diff --git a/.circleci/config.yml b/.circleci/config.yml index 79f6d2e47c5..6bfb2d013b3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,25 +13,22 @@ initWorkingDir: &initWorkingDir GOROOT=$(go env GOROOT) sudo rm -r $(go env GOROOT) sudo mkdir $GOROOT - curl https://dl.google.com/go/go1.12.5.linux-amd64.tar.gz | sudo tar xz -C $GOROOT --strip-components=1 + LATEST=$(curl -s https://golang.org/VERSION?m=text) + curl https://dl.google.com/go/${LATEST}.linux-amd64.tar.gz | sudo tar xz -C $GOROOT --strip-components=1 integrationDefaults: &integrationDefaults machine: image: ubuntu-1604:201903-01 working_directory: ~/go/src/${CIRCLE_PROJECT_USERNAME}/coredns environment: - - K8S_VERSION: v1.13.3 - - KUBECONFIG: /home/circleci/.kube/config - - MINIKUBE_VERSION: v0.33.1 - - MINIKUBE_WANTUPDATENOTIFICATION: false - - MINIKUBE_WANTREPORTERRORPROMPT: false - - CHANGE_MINIKUBE_NONE_USER: true - - MINIKUBE_HOME: /home/circleci + - K8S_VERSION: v1.15.3 + - KIND_VERSION: v0.5.1 + - KUBECONFIG: /home/circleci/.kube/kind-config-kind setupKubernetes: &setupKubernetes - run: name: Setup Kubernetes - command: ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/build/kubernetes/minikube_setup.sh + command: ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/build/kubernetes/k8s_setup.sh buildCoreDNSImage: &buildCoreDNSImage - run: @@ -40,8 +37,7 @@ buildCoreDNSImage: &buildCoreDNSImage cd ~/go/src/${CIRCLE_PROJECT_USERNAME}/coredns make coredns SYSTEM="GOOS=linux" && \ docker build -t coredns . && \ - docker tag coredns localhost:5000/coredns && \ - docker push localhost:5000/coredns + kind load docker-image coredns jobs: kubernetes-tests: @@ -60,7 +56,7 @@ jobs: name: Run Kubernetes tests command: | cd ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/test/kubernetes - GO111MODULE=on go test -v ./... + go test -v ./... workflows: version: 2 diff --git a/.dreck.yaml b/.dreck.yaml new file mode 100644 index 00000000000..12c8a4c4be8 --- /dev/null +++ b/.dreck.yaml @@ -0,0 +1,11 @@ +features: + - aliases + - exec + +aliases: + - | + /plugin (.*) -> /label plugin/$1 + - | + /wai -> /label works as intended + - | + /release (.*) -> /exec /opt/bin/release-coredns $1 diff --git a/CODE-OF-CONDUCT.md b/.github/CODE_OF_CONDUCT.md similarity index 100% rename from CODE-OF-CONDUCT.md rename to .github/CODE_OF_CONDUCT.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 120000 index 44fcc634393..00000000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000000..eeb644cfcc0 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing to CoreDNS + +Welcome! Our community focuses on helping others and making CoreDNS the best it can be. We gladly +accept contributions and encourage you to get involved! + +## Bug Reports + +First, please [search this +repository](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety +of keywords to ensure your bug is not already reported. + +If not, [open an issue](https://github.com/coredns/coredns/issues) and answer the questions so we +can understand and reproduce the problematic behavior. + +The burden is on you to convince us that it is actually a bug in CoreDNS. This is easiest to do when +you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). +The more detailed and specific you are, the faster we will be able to help you. Check out [How to +Report Bugs Effectively](https://www.chiark.greenend.org.uk/~sgtatham/bugs.html). + +Please be kind. :smile: Remember that CoreDNS comes at no cost to you, and you're getting free help. + +## Minor Improvements and New Tests + +Submit [pull requests](https://github.com/coredns/coredns/pulls) at any time. Make sure to write +tests to assert your change is working properly and is thoroughly covered. + +## New Features + +First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with +a variety of keywords to ensure your suggestion/proposal is new. + +Please also check for existing pull requests to see if someone is already working on this. We want +to avoid duplication of effort. + +If the proposal is new and no one has opened pull request yet, you may open either an issue or a +pull request for discussion and feedback. + +If you are going to spend significant time implementing code for a pull request, best to open an +issue first and "claim" it and get feedback before you invest a lot of time. + +**If someone already opened a pull request, but you think the pull request has stalled and you would +like to open another pull request for the same or similar feature, get some of the maintainers (see +[CODEOWNERS](CODEOWNERS)) involved to resolve the situation and move things forward.** + +If possible make a pull request as small as possible, or submit multiple pull request to complete a +feature. Smaller means: easier to understand and review. This in turn means things can be merged +faster. + +## New Plugins + +A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler plate. +This is a considerable amount of code and will take time to review. To prevent too much back and +forth it is advisable to start with the plugin's `README.md`; This will be its main documentation +and will help nail down the correct name of the plugin and its various config options. + +From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, etc.). +Doing this will help the reviewers, as each chunk of code is relatively small. + +Also read [plugin.md](https://raw.githubusercontent.com/coredns/coredns/master/plugin.md) for +advice on how to write a plugin. + +## Updating Dependencies + +We use [Go Modules](https://github.com/golang/go/wiki/Modules) as the tool to manage vendor dependencies. + +Use the following to update the version of all dependencies +```sh +$ go get -u +``` + +After the dependencies have been updated or added, you might run the following to +cleanup the go module files: +```sh +$ go mod tidy +``` + +Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for more details. + +# Thank You + +Thanks for your help! CoreDNS would not be what it is today without your contributions. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index a02762671e1..00000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,8 +0,0 @@ - diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 00000000000..414c7f9298f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,27 @@ +--- +name: Bug Report +about: Report a bug encountered while using CoreDNS +labels: bug + +--- + + + +**What happened**: + +**What you expected to happen**: + +**How to reproduce it (as minimally and precisely as possible)**: + +**Anything else we need to know?**: + +**Environment**: + +- the version of CoreDNS: +- Corefile: +- logs, if applicable: +- OS (e.g: `cat /etc/os-release`): +- Others: diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 00000000000..d39c37c0109 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,11 @@ +--- +name: Enhancement Request +about: Suggest an enhancement to the CoreDNS project +labels: enhancement + +--- + + +**What would you like to be added**: + +**Why is this needed**: diff --git a/SECURITY-RELEASE-PROCESS.md b/.github/SECURITY.md similarity index 100% rename from SECURITY-RELEASE-PROCESS.md rename to .github/SECURITY.md diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index ebcc73a54a6..00000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 100 -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 21 -# Issues with these labels will never be considered stale -exemptLabels: - - pinned - - security - - later - - bug -# Label to use when marking an issue as stale -staleLabel: wontfix-stalebot -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: false diff --git a/.github/workflows/go.tidy.yml b/.github/workflows/go.tidy.yml new file mode 100644 index 00000000000..3c6aa2be23d --- /dev/null +++ b/.github/workflows/go.tidy.yml @@ -0,0 +1,43 @@ +name: go tidy + +on: + push: + branches: + - 'master' + paths: + - '.github/workflows/go.tidy.yml' + - 'go.mod' + - 'go.sum' + +jobs: + fix: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v1 + - + # https://github.com/actions/checkout/issues/6 + name: Fix detached HEAD + run: git checkout ${GITHUB_REF#refs/heads/} + - + name: Tidy + run: | + rm -f go.sum + go mod tidy + - + name: Set up Git + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git config user.name "coredns-auto-go-mod-tidy[bot]" + git config user.email "coredns-auto-go-mod-tidy[bot]@users.noreply.github.com" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git + - + name: Commit and push changes + run: | + git add . + if output=$(git status --porcelain) && [ ! -z "$output" ]; then + git commit -m 'auto go mod tidy' + git push + fi diff --git a/.gitignore b/.gitignore index 89971ccd0cb..e4585f723b3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ query.log Corefile *.swp -coredns +^coredns$ coredns.exe coredns.exe~ kubectl @@ -13,3 +13,5 @@ coverage.txt .classpath .project .settings/** +build/ +release/ diff --git a/.release b/.release deleted file mode 100644 index 2c0733315e4..00000000000 --- a/.release +++ /dev/null @@ -1 +0,0 @@ -3.11 diff --git a/.travis.yml b/.travis.yml index 66b321739da..e10ff20c5ee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,9 @@ dist: xenial - +services: + - docker language: go go: - - "1.12.x" + - "1.13.x" cache: directories: @@ -18,10 +19,16 @@ branches: - master env: - - TEST_TYPE=coverage - - TEST_TYPE=integration - - TEST_TYPE=core - - TEST_TYPE=plugin + global: + # This is FUZZIT_API_KEY + - secure: "IGpZAyt1e5BZ1C4LeJG+GrgFZzaBQkJ3BX/+MwWN85aJSDk5gwThS53OCr/7RFgBKBgP9xBv9i9hAv0PxVaRE0ETIzjc0rQzceJIWiYKfFYQyscFahKfSiGsWP32rMlU3K67tA7yITS+Z8mMyVH9Ndr1Fg9AmLL+WfATdrd6dP8hzsUpaghKlnJee9TycrfamDpISzecdOY9xzxcwRyphZxuCc/n236Nt7f7Ccz0zx/Qa5igX6mjKZpUyBpS2u02GmNJTfc3W5SbTRP5bSJ+ozSkZZyG3tTpYmeN87AQJ/oG7rUEzqGLt78i7jSYAXghJZT06H/fHFsOKssCj1m0hYiarnGoGzXScLDqp2fpkyzilsUT+W0VgXTy2Nq+88Sideiy6UwDwpqHr5ktyoYFeSVB/aCTJl5oxDxBqs9dfeJSEAy7/AYy8kJoIE/yPYsBnGw10CAED4Rf5mfDgstkZRBdAO0xLBihkPsgza2975DVf27YSjJZ4eKrnR+G/aNCKycLQvWD/5c2bcLCJqyz0uMLQC/4LspS9b5bAKurzqFRdrD5q78NDcbodHelc7zBlFrRwGFCUjXTbQoU6r+1FA8y2Z+n1bd7mIF1JBVHurYAygyYXOcry870hyucGojonvdgBvHp6txeYyPU14VvTNwkF2mddpBCvoSTSPZ5X64=" + matrix: + - TEST_TYPE=coverage + - TEST_TYPE=integration + - TEST_TYPE=core + - TEST_TYPE=plugin +# - TEST_TYPE=fuzzit FUZZIT_TYPE=local-regression +# - TEST_TYPE=fuzzit FUZZIT_TYPE=fuzzing # In the Travis VM-based build environment, IPv6 networking is not # enabled by default. The sysctl operations below enable IPv6. diff --git a/ADOPTERS.md b/ADOPTERS.md index a67917f54b0..22abc9aefb4 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -9,7 +9,7 @@ * [Z Lab](https://zlab.co.jp) uses CoreDNS in production combination with Consul and Kubernetes Clusters. * [Serpro/estaleiro](estaleiro.serpro.gov.br) uses CoreDNS as Kubernetes' DNS Server, in production with tuned Kubernetes plugin options * [Lumo](https://thinklumo.com) uses CoreDNS as Kubernetes' DNS Server, in production and lab with default configuration -* [Booming Games](https://booming-games.com) uses CoreDNS in multiple Kubernetes clusters, with Federation plugin. expect going to production soon. +* [Booming Games](https://booming-games.com) uses CoreDNS in multiple Kubernetes clusters, with Federation plugin. expect to go into production soon. * [Sodimac](https://www.sodimac.cl) uses CoreDNS with Kubernetes in production with default configuration. * [Bose](https://www.bose.com/) uses CoreDNS with Kubernetes in production on very large cluster (over 250 nodes) * [farmotive](https://farmotive.io) uses CoreDNS in Kubernetes using default configuration, in its Lab. Expect to be in production soon. diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..520a86ec269 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,52 @@ +# @miekg, miek@miek.nl, project lead: 11/11/2020 + +* @bradbeam @chrisohaver @dilyevsky @fastest963 @greenpau @grobie @isolus @johnbelamaric @miekg @pmoroney @rajansandeep @stp-ip @superq @yongtang + +/plugin/pkg/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/coremain/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/core/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/request/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/plugin/* @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +go.sum @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +go.mod @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip + +/plugin/acl/ @miekg @ihac +/plugin/any/ @miekg +/plugin/auto/ @miekg @stp-ip +/plugin/autopath/ @chrisohaver @miekg +/plugin/azure/ @miekg @yongtang @darshanime +/plugin/bind/ @miekg +/plugin/bufsize/ @ykhr53 +/plugin/cache/ @grobie @miekg +/plugin/cancel/ @miekg +/plugin/chaos/ @miekg +/plugin/clouddns/ @miekg @yongtang +/plugin/dnssec/ @isolus @miekg +/plugin/dnstap/ @varyoo @yongtang +/plugin/erratic/ @miekg +/plugin/errors/ @miekg +/plugin/etcd/ @miekg @nitisht +/plugin/file/ @miekg @yongtang @stp-ip +/plugin/forward/ @grobie @johnbelamaric @miekg @rdrozhdzh +/plugin/grpc/ @inigohu @miekg +/plugin/health/ @fastest963 @miekg +/plugin/hosts/ @johnbelamaric @pmoroney +/plugin/k8s_external/ @miekg +/plugin/kubernetes/ @bradbeam @chrisohaver @johnbelamaric @miekg @rajansandeep @yongtang +/plugin/loadbalance/ @miekg +/plugin/log/ @miekg @nchrisdk +/plugin/loop/ @miekg @chrisohaver +/plugin/metadata/ @ekleiner @miekg +/plugin/metrics/ @fastest963 @miekg @superq @greenpau +/plugin/nsid/ @yongtang +/plugin/pprof/ @miekg +/plugin/reload/ @johnbelamaric +/plugin/rewrite/ @greenpau @johnbelamaric +/plugin/root/ @miekg +/plugin/route53/ @yongtang @dilyevsky +/plugin/secondary/ @bradbeam @miekg +/plugin/template/ @rtreffer +/plugin/tls/ @johnbelamaric +/plugin/trace/ @johnbelamaric +/plugin/transfer/ @miekg @chrisohaver +/plugin/whoami/ @miekg @chrisohaver diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 120000 index 00000000000..637237e0964 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +.github/CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index bb38013479c..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,65 +0,0 @@ -# Contributing to CoreDNS - -Welcome! Our community focuses on helping others and making CoreDNS the best it can be. We gladly -accept contributions and encourage you to get involved! - -## Bug Reports - -First, please [search this -repository](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety -of keywords to ensure your bug is not already reported. - -If not, [open an issue](https://github.com/coredns/coredns/issues) and answer the questions so we -can understand and reproduce the problematic behavior. - -The burden is on you to convince us that it is actually a bug in CoreDNS. This is easiest to do when -you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). -The more detailed and specific you are, the faster we will be able to help you. Check out [How to -Report Bugs Effectively](https://www.chiark.greenend.org.uk/~sgtatham/bugs.html). - -Please be kind. :smile: Remember that CoreDNS comes at no cost to you, and you're getting free help. - -## Minor Improvements and New Tests - -Submit [pull requests](https://github.com/coredns/coredns/pulls) at any time. Make sure to write -tests to assert your change is working properly and is thoroughly covered. - -## New Features - -First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with -a variety of keywords to ensure your suggestion/proposal is new. - -If so, you may open either an issue or a pull request for discussion and feedback. - -If you are going to spend significant time implementing code for a pull request, best to open an -issue first and "claim" it and get feedback before you invest a lot of time. - -If possible make a pull request as small as possible, or submit multiple pull request to complete a -feature. Smaller means: easier to understand and review. This in turn means things can be merged -faster. - -## Updating Dependencies - -We use Golang's [`dep`](https://github.com/golang/dep) as the tool to manage vendor dependencies. -The tool could be obtained through: - -```sh -$ go get -u github.com/golang/dep/cmd/dep -``` - -Use the following to update the locked versions of all dependencies -```sh -$ make dep-ensure -``` - -After the dependencies have been updated or added, you might run the following to -prune vendored packages: -```sh -$ dep prune -``` - -Please refer to Golang's [`dep`](https://github.com/golang/dep) for more details. - -# Thank You - -Thanks for your help! CoreDNS would not be what it is today without your contributions. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 120000 index 00000000000..784ef485bfc --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +.github/CONTRIBUTING.md \ No newline at end of file diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 3171557b0ee..36c531b582d 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -22,8 +22,8 @@ The term of the project lead is one year, with no term limit restriction. The project lead is elected by CoreDNS maintainers according to an individual's technical merit to CoreDNS project. -The current project lead is identified in the top level [OWNERS](OWNERS) file with the string -`project lead` and the term behind the name. +The current project lead is identified in the [CODEOWNERS](CODEOWNERS) file with the string +`project lead` and the term behind the name in a comment at the top of the file. ## Expectations from Maintainers @@ -35,11 +35,9 @@ participate in Pull Request reviews. Maintainers are expected to respond to assi in a *reasonable* time frame, either providing insights, or assign the Pull Requests to other maintainers. -Every Maintainer is listed in the top-level [OWNERS](https://github.com/coredns/coredns/blob/master/OWNERS) -file, with their Github handle and a possibly obfuscated email address. Everyone in the -`approvers` list is a Maintainer. - -A Maintainer is also listed in a plugin specific OWNERS file. +Every Maintainer is listed in the +[CODEOWNERS](https://github.com/coredns/coredns/blob/master/CODEOWNERS) +file, with their Github handle. A Maintainer should be a member of `maintainers@coredns.io`, although this is not a hard requirement. @@ -66,7 +64,7 @@ Changes in project lead or term is initiated by opening a github PR. Anyone from CoreDNS community can vote on the PR with either +1 or -1. Only the following votes are binding: -1) Any maintainer that has been listed in the top-level [OWNERS](OWNERS) file before the PR is opened. +1) Any maintainer that has been listed in the [CODEOWNERS](CODEOWNERS) file before the PR is opened. 2) Any maintainer from an organization may cast the vote for that organization. However, no organization should have more binding votes than 1/5 of the total number of maintainers defined in 1). diff --git a/Makefile b/Makefile index 3f538eb57af..161997ee36d 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ all: coredns .PHONY: coredns coredns: $(CHECKS) - GO111MODULE=on CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY) + CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY) .PHONY: check check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go @@ -22,35 +22,48 @@ check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go .PHONY: travis travis: ifeq ($(TEST_TYPE),core) - ( cd request ; GO111MODULE=on go test -v -race ./... ) - ( cd core ; GO111MODULE=on go test -v -race ./... ) - ( cd coremain ; GO111MODULE=on go test -v -race ./... ) + ( cd request; go test -race ./... ) + ( cd core; go test -race ./... ) + ( cd coremain; go test -race ./... ) endif ifeq ($(TEST_TYPE),integration) - ( cd test ; GO111MODULE=on go test -v -race ./... ) + ( cd test; go test -race ./... ) endif ifeq ($(TEST_TYPE),plugin) - ( cd plugin ; GO111MODULE=on go test -v -race ./... ) + ( cd plugin; go test -race ./... ) endif ifeq ($(TEST_TYPE),coverage) for d in `go list ./... | grep -v vendor`; do \ t=$$(date +%s); \ - GO111MODULE=on go test -i -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ - GO111MODULE=on go test -v -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ - echo "Coverage test $$d took $$(($$(date +%s)-t)) seconds"; \ + go test -i -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ + go test -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ if [ -f cover.out ]; then \ - cat cover.out >> coverage.txt; \ - rm cover.out; \ + cat cover.out >> coverage.txt && rm cover.out; \ fi; \ done endif +ifeq ($(TEST_TYPE),fuzzit) + # skip fuzzing for PR + if [ "$(TRAVIS_PULL_REQUEST)" = "false" ] || [ "$(FUZZIT_TYPE)" = "local-regression" ] ; then \ + export GO111MODULE=off; \ + go get -u github.com/dvyukov/go-fuzz/go-fuzz-build; \ + go get -u -v .; \ + cd ../../go-acme/lego && git checkout v2.5.0; \ + cd ../../coredns/coredns; \ + LIBFUZZER=YES $(MAKE) -f Makefile.fuzz all; \ + $(MAKE) -sf Makefile.fuzz fuzzit; \ + for i in `$(MAKE) -sf Makefile.fuzz echo`; do echo $$i; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ + done; \ + fi; +endif core/plugin/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg - GO111MODULE=on go generate coredns.go + go generate coredns.go .PHONY: gen gen: - GO111MODULE=on go generate coredns.go + go generate coredns.go .PHONY: pb pb: @@ -63,9 +76,16 @@ presubmit: .PHONY: clean clean: - GO111MODULE=on go clean + go clean rm -f coredns +.PHONY: dep-ensure +dep-ensure: + dep version || go get -u github.com/golang/dep/cmd/dep + dep ensure -v + dep prune -v + find vendor -name '*_test.go' -delete + .PHONY: test test: check ( cd request ; go test -v -race ./... ) @@ -73,3 +93,4 @@ test: check ( cd coremain ; go test -v -race ./... ) ( cd test ; go test -v -race ./... ) ( cd plugin ; go test -v -race ./... ) + diff --git a/Makefile.fuzz b/Makefile.fuzz index 666f4c93d89..5f4c1be2cde 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -1,5 +1,7 @@ # Makefile for fuzzing # +# With https://app.fuzzit.dev/ we are continuously fuzzing CoreDNS. +# # Use go-fuzz and needs the tools installed. For each fuzz.go in a plugin's directory # you can start the fuzzing with: make -f Makefile.fuzz # e.g. @@ -7,13 +9,16 @@ # make -f Makefile.fuzz forward # # Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls -# the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function. +# the plugin's ServeDNS and used the plugin/pkg/fuzz for the Do function. +# +# Installing go-fuzz is very tricky because it does not support Go modules, see the `Makefile` +# for the current trickery. The following may do the trick: # -# Installing go-fuzz -#$ go get github.com/dvyukov/go-fuzz/go-fuzz -#$ go get github.com/dvyukov/go-fuzz/go-fuzz-build +# GO111MODULE=off go get github.com/dvyukov/go-fuzz/go-fuzz-build REPO:="github.com/coredns/coredns" +FUZZIT:=v2.4.35 +# set LIBFUZZER=YES to build libfuzzer compatible targets FUZZ:=$(dir $(wildcard plugin/*/fuzz.go)) # plugin/cache/ PLUGINS:=$(foreach f,$(FUZZ),$(subst plugin, ,$(f:/=))) # > /cache @@ -21,18 +26,32 @@ PLUGINS:=$(foreach f,$(PLUGINS),$(subst /, ,$(f))) # > cache .PHONY: echo echo: - @echo fuzz targets: $(PLUGINS) + @echo $(PLUGINS) corefile + +all: $(PLUGINS) corefile .PHONY: $(PLUGINS) $(PLUGINS): echo - go-fuzz-build -tags fuzz $(REPO)/plugin/$(@) +ifeq ($(LIBFUZZER), YES) + go-fuzz-build -libfuzzer -o $(@).a ./plugin/$(@) + clang -fsanitize=fuzzer $(@).a -o $(@) +else + go-fuzz-build $(REPO)/plugin/$(@) go-fuzz -bin=./$(@)-fuzz.zip -workdir=fuzz/$(@) +endif .PHONY: corefile corefile: - go-fuzz-build -tags fuzz $(REPO)/test +ifeq ($(LIBFUZZER), YES) + go-fuzz-build -libfuzzer -o $(@).a ./test + clang -fsanitize=fuzzer $(@).a -o $(@) +else + go-fuzz-build $(REPO)/test go-fuzz -bin=./test-fuzz.zip -workdir=fuzz/$(@) +endif +fuzzit: + wget --quiet -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/$(FUZZIT)/fuzzit_Linux_x86_64 && chmod +x fuzzit .PHONY: clean clean: diff --git a/README.md b/README.md index d881401c475..72f2ef2c944 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/coredns/coredns) [![Build Status](https://img.shields.io/travis/coredns/coredns/master.svg?label=build)](https://travis-ci.org/coredns/coredns) +[![fuzzit](https://app.fuzzit.dev/badge?org_id=coredns&branch=master)](https://fuzzit.dev) [![Code Coverage](https://img.shields.io/codecov/c/github/coredns/coredns/master.svg)](https://codecov.io/github/coredns/coredns?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/coredns/coredns.svg)](https://hub.docker.com/r/coredns/coredns) [![Go Report Card](https://goreportcard.com/badge/github.com/coredns/coredns)](https://goreportcard.com/report/coredns/coredns) @@ -18,7 +19,7 @@ provided out of the box you can add it by [writing a plugin](https://coredns.io/ CoreDNS can listen for DNS requests coming in over UDP/TCP (go'old DNS), TLS ([RFC 7858](https://tools.ietf.org/html/rfc7858)), also called DoT, DNS over HTTP/2 - DoH - -([RFC 8484](https://tools.ietf.org/html/rfc7858)) and [gRPC](https://grpc.io) (not a standard). +([RFC 8484](https://tools.ietf.org/html/rfc8484)) and [gRPC](https://grpc.io) (not a standard). Currently CoreDNS is able to: @@ -29,15 +30,17 @@ Currently CoreDNS is able to: * Allow for zone transfers, i.e., act as a primary server (*file*). * Automatically load zone files from disk (*auto*). * Caching of DNS responses (*cache*). -* Use etcd as a backend (replace [SkyDNS](https://github.com/skynetservices/skydns)) (*etcd*). +* Use etcd as a backend (replacing [SkyDNS](https://github.com/skynetservices/skydns)) (*etcd*). * Use k8s (kubernetes) as a backend (*kubernetes*). * Serve as a proxy to forward queries to some other (recursive) nameserver (*forward*). * Provide metrics (by using Prometheus) (*metrics*). * Provide query (*log*) and error (*errors*) logging. +* Integrate with cloud providers (*route53*). * Support the CH class: `version.bind` and friends (*chaos*). * Support the RFC 5001 DNS name server identifier (NSID) option (*nsid*). * Profiling support (*pprof*). * Rewrite queries (qtype, qclass and qname) (*rewrite* and *template*). +* Block ANY queries (*any*). And more. Each of the plugins is documented. See [coredns.io/plugins](https://coredns.io/plugins) for all in-tree plugins, and [coredns.io/explugins](https://coredns.io/explugins) for all @@ -45,11 +48,13 @@ out-of-tree plugins. ## Compilation from Source -To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you don’t have that already configured. +To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you don’t have +that already configured. First, make sure your golang version is 1.12 or higher as `go mod` support is needed. See [here](https://github.com/golang/go/wiki/Modules) for `go mod` details. Then, check out the project and run `make` to compile the binary: + ~~~ $ git clone https://github.com/coredns/coredns $ cd coredns @@ -60,12 +65,11 @@ This should yield a `coredns` binary. ## Compilation with Docker -CoreDNS requires Go to compile. However, if you already have docker installed and prefer not to setup -a Go environment, you could build CoreDNS easily: +CoreDNS requires Go to compile. However, if you already have docker installed and prefer not to +setup a Go environment, you could build CoreDNS easily: ``` -$ docker run --rm -i -t -v $PWD:/go/src/github.com/coredns/coredns \ - -w /go/src/github.com/coredns/coredns golang:1.12 make +$ docker run --rm -i -t -v $PWD:/v -w /v golang:1.12 make ``` The above command alone will have `coredns` binary generated. @@ -78,15 +82,18 @@ When starting CoreDNS without any configuration, it loads the ~~~ txt .:53 -2016/09/18 09:20:50 [INFO] CoreDNS-001 -CoreDNS-001 + ______ ____ _ _______ + / ____/___ ________ / __ \/ | / / ___/ ~ CoreDNS-1.6.3 + / / / __ \/ ___/ _ \/ / / / |/ /\__ \ ~ linux/amd64, go1.13, +/ /___/ /_/ / / / __/ /_/ / /| /___/ / +\____/\____/_/ \___/_____/_/ |_//____/ ~~~ Any query sent to port 53 should return some information; your sending address, port and protocol used. -If you have a Corefile without a port number specified it will, by default, use port 53, but you -can override the port with the `-dns.port` flag: +If you have a Corefile without a port number specified it will, by default, use port 53, but you can +override the port with the `-dns.port` flag: `./coredns -dns.port 1053`, runs the server on port 1053. @@ -101,8 +108,8 @@ Start a simple proxy. You'll need to be root to start listening on port 53. } ~~~ -Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded to -8.8.8.8 and the response will be returned. Each query should also show up in the log which is +Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded +to 8.8.8.8 and the response will be returned. Each query should also show up in the log which is printed on standard output. Serve the (NSEC) DNSSEC-signed `example.org` on port 1053, with errors and logging sent to standard @@ -120,21 +127,24 @@ example.org:1053 { } ~~~ -Serve `example.org` on port 1053, but forward everything that does *not* match `example.org` to a recursive -nameserver *and* rewrite ANY queries to HINFO. +Serve `example.org` on port 1053, but forward everything that does *not* match `example.org` to a +recursive nameserver *and* rewrite ANY queries to HINFO. ~~~ txt -.:1053 { - rewrite ANY HINFO - forward . 8.8.8.8:53 - - file /var/lib/coredns/example.org.signed example.org { +example.org:1053 { + file /var/lib/coredns/example.org.signed { transfer to * transfer to 2001:500:8f::53 } errors log } +. { + any + forward . 8.8.8.8:53 + errors + log +} ~~~ IP addresses are also allowed. They are automatically converted to reverse zones: @@ -152,7 +162,7 @@ add the closing dot: `10.0.0.0/24.` as this also stops the conversion. This even works for CIDR (See RFC 1518 and 1519) addressing, i.e. `10.0.0.0/25`, CoreDNS will then check if the `in-addr` request falls in the correct range. -Listening on TLS and for gRPC? Use: +Listening on TLS (DoT) and for gRPC? Use: ~~~ corefile tls://example.org grpc://example.org { @@ -160,6 +170,14 @@ tls://example.org grpc://example.org { } ~~~ +And for DNS over HTTP/2 (DoH) use: + +~~~ corefile +https://example.org { + whoami +} +~~~ + Specifying ports works in the same way: ~~~ txt @@ -184,6 +202,11 @@ More resources can be found: - Twitter: [@corednsio](https://twitter.com/corednsio) - Mailing list/group: (not very active) +## Contribution guidelines + +If you want to contribute to CoreDNS, be sure to review the [contribution +guidelines](CONTRIBUTING.md). + ## Deployment Examples for deployment via systemd and other use cases can be found in the [deployment @@ -206,8 +229,8 @@ And finally 1.4.1 that removes the config workarounds. ## Security ### Security Audit - -A third party security audit was performed by Cure53, you can see the full report [here](https://coredns.io/assets/DNS-01-report.pdf). +A third party security audit was performed by Cure53, you can see the full report +[here](https://coredns.io/assets/DNS-01-report.pdf). ### Reporting security vulnerabilities @@ -215,4 +238,5 @@ If you find a security vulnerability or any security related issues, please DO N issue, instead send your report privately to `security@coredns.io`. Security reports are greatly appreciated and we will publicly thank you for it. -Please consult [security vulnerability disclosures and security fix and release process document](https://github.com/coredns/coredns/blob/master/SECURITY-RELEASE-PROCESS.md) +Please consult [security vulnerability disclosures and security fix and release process +document](https://github.com/coredns/coredns/blob/master/SECURITY.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 120000 index 00000000000..22125d94b98 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1 @@ +.github/SECURITY.md \ No newline at end of file diff --git a/core/dnsserver/listen_go111.go b/core/dnsserver/listen_go111.go deleted file mode 100644 index 573988b3323..00000000000 --- a/core/dnsserver/listen_go111.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.11 -// +build aix darwin dragonfly freebsd linux netbsd openbsd - -package dnsserver - -import ( - "context" - "net" - "syscall" - - "github.com/coredns/coredns/plugin/pkg/log" - - "golang.org/x/sys/unix" -) - -func reuseportControl(network, address string, c syscall.RawConn) error { - c.Control(func(fd uintptr) { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { - log.Warningf("Failed to set SO_REUSEPORT on socket: %s", err) - } - }) - return nil -} - -func listen(network, addr string) (net.Listener, error) { - lc := net.ListenConfig{Control: reuseportControl} - return lc.Listen(context.Background(), network, addr) -} - -func listenPacket(network, addr string) (net.PacketConn, error) { - lc := net.ListenConfig{Control: reuseportControl} - return lc.ListenPacket(context.Background(), network, addr) -} diff --git a/core/dnsserver/listen_go_not111.go b/core/dnsserver/listen_go_not111.go deleted file mode 100644 index 11021d09991..00000000000 --- a/core/dnsserver/listen_go_not111.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - -package dnsserver - -import "net" - -func listen(network, addr string) (net.Listener, error) { return net.Listen(network, addr) } - -func listenPacket(network, addr string) (net.PacketConn, error) { - return net.ListenPacket(network, addr) -} diff --git a/core/dnsserver/register.go b/core/dnsserver/register.go index 4b2cd95fc8f..27144385700 100644 --- a/core/dnsserver/register.go +++ b/core/dnsserver/register.go @@ -28,7 +28,7 @@ func init() { DefaultInput: func() caddy.Input { return caddy.CaddyfileInput{ Filepath: "Corefile", - Contents: []byte(".:" + Port + " {\nwhoami\n}\n"), + Contents: []byte(".:" + Port + " {\nwhoami\nlog\n}\n"), ServerTypeName: serverType, } }, diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 55895430414..9b8fb23e4b2 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "runtime" + "strings" "sync" "time" @@ -14,6 +15,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/edns" "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/rcode" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/trace" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/coredns/coredns/request" @@ -63,6 +65,10 @@ func NewServer(addr string, group []*Config) (*Server, error) { if site.Debug { s.debug = true log.D.Set() + } else { + // When reloading we need to explicitly disable debug logging if it is now disabled. + s.debug = false + log.D.Clear() } // set the config per zone s.zones[site.Zone] = site @@ -121,7 +127,7 @@ func (s *Server) ServePacket(p net.PacketConn) error { // Listen implements caddy.TCPServer interface. func (s *Server) Listen() (net.Listener, error) { - l, err := listen("tcp", s.Addr[len(transport.DNS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.DNS+"://"):]) if err != nil { return nil, err } @@ -135,7 +141,7 @@ func (s *Server) WrapListener(ln net.Listener) net.Listener { // ListenPacket implements caddy.UDPServer interface. func (s *Server) ListenPacket() (net.PacketConn, error) { - p, err := listenPacket("udp", s.Addr[len(transport.DNS+"://"):]) + p, err := reuseport.ListenPacket("udp", s.Addr[len(transport.DNS+"://"):]) if err != nil { return nil, err } @@ -200,6 +206,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) // In case the user doesn't enable error plugin, we still // need to make sure that we stay alive up here if rec := recover(); rec != nil { + log.Errorf("Recovered from panic in server: %q", s.Addr) vars.Panic.Inc() errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure) } @@ -216,27 +223,18 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) return } - q := r.Question[0].Name - b := make([]byte, len(q)) - var off int - var end bool - - var dshandler *Config - // Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer. w = request.NewScrubWriter(r, w) - for { - l := len(q[off:]) - for i := 0; i < l; i++ { - b[i] = q[off+i] - // normalize the name for the lookup - if b[i] >= 'A' && b[i] <= 'Z' { - b[i] |= ('a' - 'A') - } - } + q := strings.ToLower(r.Question[0].Name) + var ( + off int + end bool + dshandler *Config + ) - if h, ok := s.zones[string(b[:l])]; ok { + for { + if h, ok := s.zones[q[off:]]; ok { if r.Question[0].Qtype != dns.TypeDS { if h.FilterFunc == nil { rcode, _ := h.pluginChain.ServeDNS(ctx, w, r) @@ -258,7 +256,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) // The type is DS, keep the handler, but keep on searching as maybe we are serving // the parent as well and the DS should be routed to it - this will probably *misroute* DS // queries to a possibly grand parent, but there is no way for us to know at this point - // if there is an actually delegation from grandparent -> parent -> zone. + // if there is an actual delegation from grandparent -> parent -> zone. // In all fairness: direct DS queries should not be needed. dshandler = h } @@ -301,7 +299,6 @@ func (s *Server) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Tracer returns the tracer in the server if defined. diff --git a/core/dnsserver/server_grpc.go b/core/dnsserver/server_grpc.go index 599f5c19759..7b530f97a90 100644 --- a/core/dnsserver/server_grpc.go +++ b/core/dnsserver/server_grpc.go @@ -8,6 +8,7 @@ import ( "net" "github.com/coredns/coredns/pb" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" @@ -72,7 +73,7 @@ func (s *ServergRPC) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServergRPC) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.GRPC+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.GRPC+"://"):]) if err != nil { return nil, err } @@ -93,7 +94,6 @@ func (s *ServergRPC) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Stop stops the server. It blocks until the server is @@ -165,8 +165,8 @@ func (r *gRPCresponse) Write(b []byte) (int, error) { // These methods implement the dns.ResponseWriter interface from Go DNS. func (r *gRPCresponse) Close() error { return nil } func (r *gRPCresponse) TsigStatus() error { return nil } -func (r *gRPCresponse) TsigTimersOnly(b bool) { return } -func (r *gRPCresponse) Hijack() { return } +func (r *gRPCresponse) TsigTimersOnly(b bool) {} +func (r *gRPCresponse) Hijack() {} func (r *gRPCresponse) LocalAddr() net.Addr { return r.localAddr } func (r *gRPCresponse) RemoteAddr() net.Addr { return r.remoteAddr } func (r *gRPCresponse) WriteMsg(m *dns.Msg) error { r.Msg = m; return nil } diff --git a/core/dnsserver/server_https.go b/core/dnsserver/server_https.go index 93d62fa84ce..d2515295c69 100644 --- a/core/dnsserver/server_https.go +++ b/core/dnsserver/server_https.go @@ -12,6 +12,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/dnsutil" "github.com/coredns/coredns/plugin/pkg/doh" "github.com/coredns/coredns/plugin/pkg/response" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" ) @@ -61,7 +62,7 @@ func (s *ServerHTTPS) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServerHTTPS) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):]) if err != nil { return nil, err } @@ -82,7 +83,6 @@ func (s *ServerHTTPS) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Stop stops the server. It blocks until the server is totally stopped. diff --git a/core/dnsserver/server_tls.go b/core/dnsserver/server_tls.go index 95c2d691280..0b7fa517a2d 100644 --- a/core/dnsserver/server_tls.go +++ b/core/dnsserver/server_tls.go @@ -6,6 +6,7 @@ import ( "fmt" "net" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/miekg/dns" @@ -57,7 +58,7 @@ func (s *ServerTLS) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServerTLS) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.TLS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.TLS+"://"):]) if err != nil { return nil, err } @@ -78,5 +79,4 @@ func (s *ServerTLS) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index 9863b360757..d724696486c 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -15,6 +15,7 @@ var Directives = []string{ "tls", "reload", "nsid", + "bufsize", "root", "bind", "debug", @@ -26,6 +27,7 @@ var Directives = []string{ "errors", "log", "dnstap", + "acl", "any", "chaos", "loadbalance", @@ -35,8 +37,11 @@ var Directives = []string{ "dnssec", "autopath", "template", + "transfer", "hosts", "route53", + "azure", + "clouddns", "federation", "k8s_external", "kubernetes", @@ -50,4 +55,5 @@ var Directives = []string{ "erratic", "whoami", "on", + "sign", } diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index a6fae1a797a..c7be342edc4 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -5,20 +5,23 @@ package plugin import ( // Include all plugins. _ "github.com/caddyserver/caddy/onevent" + _ "github.com/coredns/coredns/plugin/acl" _ "github.com/coredns/coredns/plugin/any" _ "github.com/coredns/coredns/plugin/auto" _ "github.com/coredns/coredns/plugin/autopath" + _ "github.com/coredns/coredns/plugin/azure" _ "github.com/coredns/coredns/plugin/bind" + _ "github.com/coredns/coredns/plugin/bufsize" _ "github.com/coredns/coredns/plugin/cache" _ "github.com/coredns/coredns/plugin/cancel" _ "github.com/coredns/coredns/plugin/chaos" + _ "github.com/coredns/coredns/plugin/clouddns" _ "github.com/coredns/coredns/plugin/debug" _ "github.com/coredns/coredns/plugin/dnssec" _ "github.com/coredns/coredns/plugin/dnstap" _ "github.com/coredns/coredns/plugin/erratic" _ "github.com/coredns/coredns/plugin/errors" _ "github.com/coredns/coredns/plugin/etcd" - _ "github.com/coredns/coredns/plugin/federation" _ "github.com/coredns/coredns/plugin/file" _ "github.com/coredns/coredns/plugin/forward" _ "github.com/coredns/coredns/plugin/grpc" @@ -39,9 +42,12 @@ import ( _ "github.com/coredns/coredns/plugin/root" _ "github.com/coredns/coredns/plugin/route53" _ "github.com/coredns/coredns/plugin/secondary" + _ "github.com/coredns/coredns/plugin/sign" _ "github.com/coredns/coredns/plugin/template" _ "github.com/coredns/coredns/plugin/tls" _ "github.com/coredns/coredns/plugin/trace" + _ "github.com/coredns/coredns/plugin/transfer" _ "github.com/coredns/coredns/plugin/whoami" + _ "github.com/coredns/federation" _ "github.com/openshift/coredns-mdns" ) diff --git a/coredns.1.md b/coredns.1.md index 6fe61e38887..cb56bc7081b 100644 --- a/coredns.1.md +++ b/coredns.1.md @@ -1,6 +1,6 @@ ## CoreDNS -*coredns* - plugable DNS nameserver optimized for service discovery and flexibility. +*coredns* - pluggable DNS nameserver optimized for service discovery and flexibility. ## Synopsis @@ -24,9 +24,6 @@ Available options: : specify Corefile to load, if not given CoreDNS will look for a `Corefile` in the current directory. -**-cpu** **CAP** -: specify maximum CPU capacity in percent. - **-dns.port** **PORT** : override default port (53) to listen on. diff --git a/coremain/run.go b/coremain/run.go index e6f51295fdf..98ba2fd6c57 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -2,18 +2,15 @@ package coremain import ( - "errors" "flag" "fmt" "io/ioutil" "log" "os" "runtime" - "strconv" "strings" "github.com/coredns/coredns/core/dnsserver" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" ) @@ -24,7 +21,6 @@ func init() { setVersion() flag.StringVar(&conf, "conf", "", "Corefile to load (default \""+caddy.DefaultConfigFile+"\")") - flag.StringVar(&cpu, "cpu", "100%", "CPU cap") flag.BoolVar(&plugins, "plugins", false, "List installed plugins") flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file") flag.BoolVar(&version, "version", false, "Show version") @@ -73,11 +69,6 @@ func Run() { os.Exit(0) } - // Set CPU cap - if err := setCPU(cpu); err != nil { - mustLogFatal(err) - } - // Get Corefile input corefile, err := caddy.LoadCaddyfile(serverType) if err != nil { @@ -90,7 +81,6 @@ func Run() { mustLogFatal(err) } - logVersion() if !dnsserver.Quiet { showVersion() } @@ -149,12 +139,6 @@ func defaultLoader(serverType string) (caddy.Input, error) { }, nil } -// logVersion logs the version that is starting. -func logVersion() { - clog.Info(versionString()) - clog.Info(releaseString()) -} - // showVersion prints the version that is starting. func showVersion() { fmt.Print(versionString()) @@ -186,54 +170,16 @@ func setVersion() { // Only set the appVersion if -ldflags was used if gitNearestTag != "" || gitTag != "" { if devBuild && gitNearestTag != "" { - appVersion = fmt.Sprintf("%s (+%s %s)", - strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate) + appVersion = fmt.Sprintf("%s (+%s %s)", strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate) } else if gitTag != "" { appVersion = strings.TrimPrefix(gitTag, "v") } } } -// setCPU parses string cpu and sets GOMAXPROCS -// according to its value. It accepts either -// a number (e.g. 3) or a percent (e.g. 50%). -func setCPU(cpu string) error { - var numCPU int - - availCPU := runtime.NumCPU() - - if strings.HasSuffix(cpu, "%") { - // Percent - var percent float32 - pctStr := cpu[:len(cpu)-1] - pctInt, err := strconv.Atoi(pctStr) - if err != nil || pctInt < 1 || pctInt > 100 { - return errors.New("invalid CPU value: percentage must be between 1-100") - } - percent = float32(pctInt) / 100 - numCPU = int(float32(availCPU) * percent) - } else { - // Number - num, err := strconv.Atoi(cpu) - if err != nil || num < 1 { - return errors.New("invalid CPU value: provide a number or percent greater than 0") - } - numCPU = num - } - - if numCPU > availCPU { - numCPU = availCPU - } - - runtime.GOMAXPROCS(numCPU) - return nil -} - // Flags that control program flow or startup var ( conf string - cpu string - logfile bool version bool plugins bool ) diff --git a/coremain/run_test.go b/coremain/run_test.go deleted file mode 100644 index da01637d8d4..00000000000 --- a/coremain/run_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package coremain - -import ( - "runtime" - "testing" -) - -func TestSetCPU(t *testing.T) { - currentCPU := runtime.GOMAXPROCS(-1) - maxCPU := runtime.NumCPU() - halfCPU := int(0.5 * float32(maxCPU)) - if halfCPU < 1 { - halfCPU = 1 - } - for i, test := range []struct { - input string - output int - shouldErr bool - }{ - {"1", 1, false}, - {"-1", currentCPU, true}, - {"0", currentCPU, true}, - {"100%", maxCPU, false}, - {"50%", halfCPU, false}, - {"110%", currentCPU, true}, - {"-10%", currentCPU, true}, - {"invalid input", currentCPU, true}, - {"invalid input%", currentCPU, true}, - {"9999", maxCPU, false}, // over available CPU - } { - err := setCPU(test.input) - if test.shouldErr && err == nil { - t.Errorf("Test %d: Expected error, but there wasn't any", i) - } - if !test.shouldErr && err != nil { - t.Errorf("Test %d: Expected no error, but there was one: %v", i, err) - } - if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected { - t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected) - } - // teardown - runtime.GOMAXPROCS(currentCPU) - } -} diff --git a/coremain/version.go b/coremain/version.go index 9c622c58b20..a0a323921b8 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.5.2" + CoreVersion = "1.6.6" coreName = "CoreDNS" serverType = "dns" ) diff --git a/go.mod b/go.mod index a7e4e59004d..05378cbc9c3 100644 --- a/go.mod +++ b/go.mod @@ -3,35 +3,39 @@ module github.com/coredns/coredns go 1.12 require ( - github.com/Shopify/sarama v1.23.0 // indirect - github.com/aws/aws-sdk-go v1.20.5 - github.com/caddyserver/caddy v1.0.1 - github.com/coreos/etcd v3.3.13+incompatible + github.com/Azure/azure-sdk-for-go v32.6.0+incompatible + github.com/Azure/go-autorest/autorest v0.9.3 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 + github.com/aws/aws-sdk-go v1.25.48 + github.com/caddyserver/caddy v1.0.4 + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 - github.com/evanphx/json-patch v4.5.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 - github.com/golang/protobuf v1.3.1 - github.com/google/gofuzz v1.0.0 // indirect - github.com/googleapis/gnostic v0.3.0 // indirect - github.com/gophercloud/gophercloud v0.2.0 // indirect + github.com/golang/protobuf v1.3.2 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 + github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.14 - github.com/openshift/coredns-mdns v0.0.0-20190710141149-66c708637267 + github.com/miekg/dns v1.1.25 + github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a github.com/opentracing/opentracing-go v1.1.0 - github.com/openzipkin/zipkin-go-opentracing v0.3.5 - github.com/prometheus/client_golang v1.0.0 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 - github.com/prometheus/common v0.6.0 - golang.org/x/sys v0.0.0-20190618155005-516e3c20635f - google.golang.org/grpc v1.21.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 - k8s.io/api v0.0.0-20190313235455-40a48860b5ab - k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 - k8s.io/client-go v11.0.0+incompatible - k8s.io/klog v0.3.3 - k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 // indirect - k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a // indirect + github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 + github.com/prometheus/client_golang v1.2.1 + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 + github.com/prometheus/common v0.7.0 + go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 + google.golang.org/api v0.14.0 + google.golang.org/grpc v1.25.1 + gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 + k8s.io/api v0.0.0-20190620084959-7cf5895f2711 + k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 + k8s.io/client-go v0.0.0-20190620085101-78d2af792bab + k8s.io/klog v0.4.0 ) -replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.14 +replace ( + github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible + github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.22 +) diff --git a/go.sum b/go.sum index 2b011b6a49e..01c785e7700 100644 --- a/go.sum +++ b/go.sum @@ -1,59 +1,124 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= -cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= +cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= +cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v32.6.0+incompatible h1:PgaVceWF5idtJajyt1rzq1cep6eRPJ8+8hs4GnNzTo0= +github.com/Azure/azure-sdk-for-go v32.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= +github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 h1:VDSqmaEc8ECZdfavoa1KmVpIVTGTc+v/2jvHGmCYvSE= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= -github.com/Shopify/sarama v1.23.0 h1:slvlbm7bxyp7sKQbUwha5BQdZTqurhRoI+zbKorVigQ= -github.com/Shopify/sarama v1.23.0/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.0/go.mod h1:zpDJeKyp9ScW4NNrbdr+Eyxvry3ilGPewKoXw3XGN1k= +github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.20.5 h1:Ytq5AxpA2pr4vRJM9onvgAjjVRZKKO63WStbG/jLHw0= -github.com/aws/aws-sdk-go v1.20.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= -github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/caddyserver/caddy v1.0.4 h1:wwuGSkUHo6RZ3oMpeTt7J09WBB87X5o+IZN4dKehcQE= +github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= github.com/celebdor/zeroconf v0.0.0-20190404095836-c328d57fca11 h1:VthumoLRWujBLOoUGiCRF1Uyks6lZBHWoxItmkSajqA= github.com/celebdor/zeroconf v0.0.0-20190404095836-c328d57fca11/go.mod h1:u+uV1CvldRx3gLkxSL/4KfFGFRmdPWwFJ7lDwdFkCe4= -github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coredns/coredns v1.5.2/go.mod h1:0BUJT/Mo6Um12LllyolnPp15yjbGuog8z+zVrQP8Z4M= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coredns/coredns v1.6.6/go.mod h1:Bdcnka9HmKGYj12ZIDF3lpQSfDHSsMc85Wj9xEyZUts= +github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= +github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 h1:FE783w8WFh+Rvg+7bZ5g8p7gP4SeVS4AoNwkvazlsBg= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cpu/goacmedns v0.0.1/go.mod h1:sesf/pNnCYwUevQEQfEwY0Y3DydlQWSGZbaMElOWxok= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decker502/dnspod-go v0.2.0/go.mod h1:qsurYu1FgxcDwfSwXJdLt4kRsBLZeosEb9uq4Sy+08g= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -61,109 +126,131 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= +github.com/go-acme/lego/v3 v3.1.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-acme/lego/v3 v3.2.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= -github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic= -github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= +github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.8.3 h1:wZ6biDjnBgIOf5t+r8dYsoGWH1Zl2Ps32OxD80Odbk8= -github.com/grpc-ecosystem/grpc-gateway v1.8.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 h1:d3VSuNcgTCn21dNMm8g412Fck/XWFmMj4nJhhHT7ZZ0= +github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -175,121 +262,179 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HKfQoRHCdcsROXKvmoehKA= +github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w= +github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA= +github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= +github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= +github.com/marten-seemann/chacha20 v0.2.0/go.mod h1:HSdjFau7GzYRj+ahFNwsO3ouVJr1HFkWoEwNDb4TMtE= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qtls v0.4.1/go.mod h1:pxVXcHHw1pNIt8Qo0pwSYQEoZ8yYOOPXTCZLQQunvRc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.14 h1:wkQWn9wIp4mZbwW8XV6Km6owkvRPbOiV004ZM2CkGvA= -github.com/miekg/dns v1.1.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/nrdcg/auroradns v1.0.0/go.mod h1:6JPXKzIRzZzMqtTDgueIhTi6rFf1QvYE/HzqidhOhjw= +github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2SwKQ= +github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openshift/coredns-mdns v0.0.0-20190710141149-66c708637267 h1:ngfwqBS4NShToPtzbTXl7leBUooNbh2GySpQBYfg6DQ= -github.com/openshift/coredns-mdns v0.0.0-20190710141149-66c708637267/go.mod h1:FWHa7LOqMyNEb6yTW5Xxtjagtz6dSJK4j8P5QeoVEYs= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a h1:twcIn3o9X7RkDiQpGP64zpUHESQUwAfwgQ7Mvr0beM4= +github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a/go.mod h1:BLITL6gPQ/ou0UMHoA3YvFdAoh2UsV/YNgA7Sx9DMz4= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= -github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= -github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a h1:kw8SHTZWndtiiC6ht2gBebCOGycQHLGERawMZljmbAY= +go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -297,18 +442,24 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -317,111 +468,136 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190618155005-516e3c20635f h1:dHNZYIYdq2QuU6w73vZ/DzesPbVlZVYZTtTZmrnsbQ8= -golang.org/x/sys v0.0.0-20190618155005-516e3c20635f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 h1:0LGHEA/u5XLibPOx6D7D8FBT/ax6wT57vNKY0QckCwo= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88= +gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw= +gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 h1:uV4S5IB5g4Nvi+TBVNf3e9L4wrirlwYJ6w88jUQxTUw= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 h1:zia7dTzfEtdiSUxi9cXUDsSQH2xE6igmGKyFn2on/9A= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 h1:di3XCwddOR9cWBNpfgXaskhh6cgJuwcK54rvtwUaC10= -k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a h1:2jUDc9gJja832Ftp+QbDV0tVhQHMISFn01els+2ZAcw= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/man/coredns-acl.7 b/man/coredns-acl.7 new file mode 100644 index 00000000000..d92594171c3 --- /dev/null +++ b/man/coredns-acl.7 @@ -0,0 +1,105 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ACL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.PP +\fIacl\fP - enforces access control policies on source ip and prevents unauthorized access to DNS servers. + +.SH "DESCRIPTION" +.PP +With \fB\fCacl\fR enabled, users are able to block suspicious DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. + +.PP +This plugin can be used multiple times per Server Block. + +.SH "SYNTAX" +.PP +.RS + +.nf +acl [ZONES...] { + ACTION [type QTYPE...] [net SOURCE...] +} + +.fi +.RE + +.IP \(bu 4 +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block are used. +.IP \(bu 4 +\fBACTION\fP (\fIallow\fP or \fIblock\fP) defines the way to deal with DNS queries matched by this rule. The default action is \fIallow\fP, which means a DNS query not matched by any rules will be allowed to recurse. +.IP \(bu 4 +\fBQTYPE\fP is the query type to match for the requests to be allowed or blocked. Common resource record types are supported. \fB\fC*\fR stands for all record types. The default behavior for an omitted \fB\fCtype QTYPE...\fR is to match all kinds of DNS queries (same as \fB\fCtype *\fR). +.IP \(bu 4 +\fBSOURCE\fP is the source IP address to match for the requests to be allowed or blocked. Typical CIDR notation and single IP address are supported. \fB\fC*\fR stands for all possible source IP addresses. + + +.SH "EXAMPLES" +.PP +To demonstrate the usage of plugin acl, here we provide some typical examples. + +.PP +Block all DNS queries with record type A from 192.168.0.0/16: + +.PP +.RS + +.nf +\&. { + acl { + block type A net 192.168.0.0/16 + } +} + +.fi +.RE + +.PP +Block all DNS queries from 192.168.0.0/16 except for 192.168.1.0/24: + +.PP +.RS + +.nf +\&. { + acl { + allow net 192.168.1.0/24 + block net 192.168.0.0/16 + } +} + +.fi +.RE + +.PP +Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: + +.PP +.RS + +.nf +\&. { + acl { + allow net 192.168.0.0/16 192.168.1.0/24 + block + } +} + +.fi +.RE + +.PP +Block all DNS queries from 192.168.1.0/24 towards a.example.org: + +.PP +.RS + +.nf +example.org { + acl a.example.org { + block net 192.168.1.0/24 + } +} + +.fi +.RE + diff --git a/man/coredns-any.7 b/man/coredns-any.7 index 6d8f795da42..a745461bf32 100644 --- a/man/coredns-any.7 +++ b/man/coredns-any.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ANY" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ANY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIany\fP - give a minimal response to ANY queries. +\fIany\fP - gives a minimal response to ANY queries. .SH "DESCRIPTION" .PP @@ -41,7 +41,7 @@ A \fB\fCdig +nocmd ANY example.org +noall +answer\fR now returns: .RS .nf -example.org. 8482 IN HINFO "ANY obsoleted" "See RFC 8482" +example.org. 8482 IN HINFO "ANY obsoleted" "See RFC 8482" .fi .RE diff --git a/man/coredns-auto.7 b/man/coredns-auto.7 index 4dfd7758e24..c3b9250366c 100644 --- a/man/coredns-auto.7 +++ b/man/coredns-auto.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AUTO" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AUTO" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -77,8 +77,8 @@ notifies to 10.240.1.1 .RS .nf -\&. { - auto org { +org { + auto { directory /etc/coredns/zones/org transfer to * transfer to 10.240.1.1 diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index 9150dce0264..39792104782 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AUTOPATH" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AUTOPATH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -36,7 +36,7 @@ If a plugin implements the \fB\fCAutoPather\fR interface then it can be used. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_autopath_success_count_total{server}\fR - counter of successfully autopath-ed queries. @@ -56,7 +56,7 @@ autopath my\-resolv.conf .RE .PP -Use \fB\fCmy-resolv.conf\fR as the file to get the search path from. This file only needs so have one line: +Use \fB\fCmy-resolv.conf\fR as the file to get the search path from. This file only needs to have one line: \fB\fCsearch domain1 domain2 ...\fR .PP diff --git a/man/coredns-azure.7 b/man/coredns-azure.7 new file mode 100644 index 00000000000..db86a38ce53 --- /dev/null +++ b/man/coredns-azure.7 @@ -0,0 +1,70 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AZURE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIazure\fP - enables serving zone data from Microsoft Azure DNS service. + +.SH "DESCRIPTION" +.PP +The azure plugin is useful for serving zones from Microsoft Azure DNS. The \fIazure\fP plugin supports +all the DNS records supported by Azure, viz. A, AAAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT +record types. + +.SH "SYNTAX" +.PP +.RS + +.nf +azure RESOURCE\_GROUP:ZONE... { + tenant TENANT\_ID + client CLIENT\_ID + secret CLIENT\_SECRET + subscription SUBSCRIPTION\_ID + environment ENVIRONMENT + fallthrough [ZONES...] +} + +.fi +.RE + +.IP \(bu 4 +\fBRESOURCE_GROUP:ZONE\fP is the resource group to which the hosted zones belongs on Azure, +and \fBZONE\fP the zone that contains data. +.IP \(bu 4 +\fBCLIENT_ID\fP and \fBCLIENT_SECRET\fP are the credentials for Azure, and \fB\fCtenant\fR specifies the +\fBTENANT_ID\fP to be used. \fBSUBSCRIPTION_ID\fP is the subscription ID. All of these are needed +to access the data in Azure. +.IP \(bu 4 +\fB\fCenvironment\fR specifies the Azure \fBENVIRONMENT\fP. +.IP \(bu 4 +\fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. +If \fBZONES\fP is omitted, then fallthrough happens for all zones for which the plugin is +authoritative. + + +.SH "EXAMPLES" +.PP +Enable the \fIazure\fP plugin with Azure credentials for the zone \fB\fCexample.org\fR: + +.PP +.RS + +.nf +example.org { + azure resource\_group\_foo:example.org { + tenant 123abc\-123abc\-123abc\-123abc + client 123abc\-123abc\-123abc\-234xyz + subscription 123abc\-123abc\-123abc\-563abc + secret mysecret + } +} + +.fi +.RE + +.SH "ALSO SEE" +.PP +The Azure DNS Overview +\[la]https://docs.microsoft.com/en-us/azure/dns/dns-overview\[ra]. + diff --git a/man/coredns-bind.7 b/man/coredns-bind.7 index 67fbeec5c41..2cfc34a8ca4 100644 --- a/man/coredns-bind.7 +++ b/man/coredns-bind.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-BIND" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-BIND" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -60,7 +60,7 @@ To allow processing DNS requests only local host on both IPv4 and IPv6 stacks, u .RE .PP -If the configuration comes up with several \fIbind\fP directives, all addresses are consolidated together: +If the configuration comes up with several \fIbind\fP plugins, all addresses are consolidated together: The following sample is equivalent to the preceding: .PP diff --git a/man/coredns-bufsize.7 b/man/coredns-bufsize.7 new file mode 100644 index 00000000000..e3e4e0e97b2 --- /dev/null +++ b/man/coredns-bufsize.7 @@ -0,0 +1,67 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-BUFSIZE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIbufsize\fP - sizes EDNS0 buffer size to prevent IP fragmentation. + +.SH "DESCRIPTION" +.PP +\fIbufsize\fP limits a requester's UDP payload size. +It prevents IP fragmentation so that to deal with DNS vulnerability. + +.SH "SYNTAX" +.PP +.RS + +.nf +bufsize [SIZE] + +.fi +.RE + +.PP +\fB[SIZE]\fP is an int value for setting the buffer size. +The default value is 512, and the value must be within 512 - 4096. +Only one argument is acceptable, and it covers both IPv4 and IPv6. + +.SH "EXAMPLES" +.PP +Enable limiting the buffer size of outgoing query to the resolver (172.31.0.10): + +.PP +.RS + +.nf +\&. { + bufsize 512 + forward . 172.31.0.10 + log +} + +.fi +.RE + +.PP +Enable limiting the buffer size as an authoritative nameserver: + +.PP +.RS + +.nf +\&. { + bufsize 512 + file db.example.org + log +} + +.fi +.RE + +.SH "CONSIDERATIONS" +.IP \(bu 4 +Setting 1232 bytes to bufsize may avoid fragmentation on the majority of networks in use today, but it depends on the MTU of the physical network links. +.IP \(bu 4 +For now, if a client does not use EDNS, this plugin adds OPT RR. + + diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index 4388d8beb22..0349af0f6c1 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CACHE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CACHE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -48,6 +48,7 @@ cache [TTL] [ZONES...] { success CAPACITY [TTL] [MINTTL] denial CAPACITY [TTL] [MINTTL] prefetch AMOUNT [[DURATION] [PERCENTAGE%]] + serve\_stale [DURATION] } .fi @@ -70,6 +71,11 @@ Popular means \fBAMOUNT\fP queries have been seen with no gaps of \fBDURATION\fP \fBDURATION\fP defaults to 1m. Prefetching will happen when the TTL drops below \fBPERCENTAGE\fP, which defaults to \fB\fC10%\fR, or latest 1 second before TTL expiration. Values should be in the range \fB\fC[10%, 90%]\fR. Note the percent sign is mandatory. \fBPERCENTAGE\fP is treated as an \fB\fCint\fR. +.IP \(bu 4 +\fB\fCserve_stale\fR, when serve_stale is set, cache always will serve an expired entry to a client if there is one +available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache +entry to the client. The responses have a TTL of 0. \fBDURATION\fP is how far back to consider +stale responses as fresh. The default duration is 1h. .SH "CAPACITY AND EVICTION" @@ -85,7 +91,7 @@ Entries with 0 TTL will remain in the cache until randomly evicted when the shar .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_cache_size{server, type}\fR - Total elements in the cache by cache type. @@ -95,6 +101,8 @@ If monitoring is enabled (via the \fIprometheus\fP directive) then the following \fB\fCcoredns_cache_misses_total{server}\fR - Counter of cache misses. .IP \(bu 4 \fB\fCcoredns_cache_drops_total{server}\fR - Counter of dropped messages. +.IP \(bu 4 +\fB\fCcoredns_cache_served_stale_total{server}\fR - Counter of requests served from stale cache entries. .PP @@ -133,18 +141,18 @@ Proxy to Google Public DNS and only cache responses for example.org (or below). .RE .PP -Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500: +Enable caching for \fB\fCexample.org\fR, keep a positive cache size of 5000 and a negative cache size of 2500: .PP .RS .nf - . { - cache { - success 5000 - denial 2500 +example.org { + cache { + success 5000 + denial 2500 } - } +} .fi .RE diff --git a/man/coredns-cancel.7 b/man/coredns-cancel.7 index 2c6f095c38c..3cb6f62924d 100644 --- a/man/coredns-cancel.7 +++ b/man/coredns-cancel.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CANCEL" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CANCEL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIcancel\fP - a plugin that cancels a request's context after 5001 milliseconds. +\fIcancel\fP - cancels a request's context after 5001 milliseconds. .SH "DESCRIPTION" .PP @@ -11,7 +11,7 @@ The \fIcancel\fP plugin creates a canceling context for each request. It adds a triggered after 5001 milliseconds. .PP -The 5001 number is chosen because the default timeout for DNS clients is 5 seconds, after that they +The 5001 number was chosen because the default timeout for DNS clients is 5 seconds, after that they give up. .PP @@ -37,7 +37,7 @@ cancel [TIMEOUT] .RS .nf -\&. { +example.org { cancel whoami } @@ -52,7 +52,7 @@ Or with a custom timeout: .RS .nf -\&. { +example.org { cancel 1s whoami } diff --git a/man/coredns-chaos.7 b/man/coredns-chaos.7 index 31d2655a21a..07d7e5ee606 100644 --- a/man/coredns-chaos.7 +++ b/man/coredns-chaos.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CHAOS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CHAOS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -8,7 +8,7 @@ .SH "DESCRIPTION" .PP This is useful for retrieving version or author information from the server by querying a TXT record -for a special domainname in the CH class. +for a special domain name in the CH class. .SH "SYNTAX" .PP @@ -70,7 +70,7 @@ And test with \fB\fCdig\fR: % dig @localhost CH TXT version.bind \&... ;; ANSWER SECTION: -version.bind. 0 CH TXT "CoreDNS\-001" +version.bind. 0 CH TXT "CoreDNS\-001" \&... .fi diff --git a/man/coredns-clouddns.7 b/man/coredns-clouddns.7 new file mode 100644 index 00000000000..4783e028d15 --- /dev/null +++ b/man/coredns-clouddns.7 @@ -0,0 +1,100 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CLOUDDNS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIclouddns\fP - enables serving zone data from GCP Cloud DNS. + +.SH "DESCRIPTION" +.PP +The \fIclouddns\fP plugin is useful for serving zones from resource record +sets in GCP Cloud DNS. This plugin supports all Google Cloud DNS +records +\[la]https://cloud.google.com/dns/docs/overview#supported_dns_record_types\[ra]. This plugin can +be used when CoreDNS is deployed on GCP or elsewhere. Note that this plugin accesses the resource +records through the Google Cloud API. For records in a privately hosted zone, it is not necessary to +place CoreDNS and this plugin in the associated VPC network. In fact the private hosted zone could +be created without any associated VPC and this plugin could still access the resource records under +the hosted zone. + +.SH "SYNTAX" +.PP +.RS + +.nf +clouddns [ZONE:PROJECT\_ID:HOSTED\_ZONE\_NAME...] { + credentials [FILENAME] + fallthrough [ZONES...] +} + +.fi +.RE + +.IP \(bu 4 +\fBZONE\fP the name of the domain to be accessed. When there are multiple zones with overlapping +domains (private vs. public hosted zone), CoreDNS does the lookup in the given order here. +Therefore, for a non-existing resource record, SOA response will be from the rightmost zone. +.IP \(bu 4 +\fBPROJECT_ID\fP the project ID of the Google Cloud project. +.IP \(bu 4 +\fBHOSTED\fIZONE\fPNAME\fP the name of the hosted zone that contains the resource record sets to be +accessed. +.IP \(bu 4 +\fB\fCcredentials\fR is used for reading the credential file. +.IP \(bu 4 +\fBFILENAME\fP GCP credentials file path (normally a .json file). +.IP \(bu 4 +\fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. +If \fB[ZONES...]\fP is omitted, then fallthrough happens for all zones for which the plugin is +authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\fR and \fB\fCip6.arpa\fR), then +only queries for those zones will be subject to fallthrough. +.IP \(bu 4 +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block + + +.SH "EXAMPLES" +.PP +Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: + +.PP +.RS + +.nf +example.org { + clouddns example.org.:gcp\-example\-project:example\-zone + forward . 10.0.0.1 +} + +.fi +.RE + +.PP +Enable clouddns with fallthrough: + +.PP +.RS + +.nf +example.org { + clouddns example.org.:gcp\-example\-project:example\-zone example.com.:gcp\-example\-project:example\-zone\-2 { + fallthrough example.gov. + } +} + +.fi +.RE + +.PP +Enable clouddns with multiple hosted zones with the same domain: + +.PP +.RS + +.nf +\&. { + clouddns example.org.:gcp\-example\-project:example\-zone example.com.:gcp\-example\-project:other\-example\-zone +} + +.fi +.RE + diff --git a/man/coredns-debug.7 b/man/coredns-debug.7 index 7880e9fe4ac..a00c376ab08 100644 --- a/man/coredns-debug.7 +++ b/man/coredns-debug.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DEBUG" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DEBUG" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnssec.7 b/man/coredns-dnssec.7 index 012afb6d12f..c9d944808cd 100644 --- a/man/coredns-dnssec.7 +++ b/man/coredns-dnssec.7 @@ -1,13 +1,13 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DNSSEC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DNSSEC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIdnssec\fP - enable on-the-fly DNSSEC signing of served data. +\fIdnssec\fP - enables on-the-fly DNSSEC signing of served data. .SH "DESCRIPTION" .PP -With \fIdnssec\fP any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated +With \fIdnssec\fP, any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is \fInot\fP supported. @@ -69,7 +69,7 @@ RRSIGs. The default for \fBCAPACITY\fP is 10000. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_dnssec_cache_size{server, type}\fR - total elements in the cache, type is "signature". diff --git a/man/coredns-dnstap.7 b/man/coredns-dnstap.7 index 1477199e9cf..e891614c5bd 100644 --- a/man/coredns-dnstap.7 +++ b/man/coredns-dnstap.7 @@ -1,13 +1,13 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DNSTAP" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DNSTAP" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIdnstap\fP - enable logging to dnstap. +\fIdnstap\fP - enables logging to dnstap. .SH "DESCRIPTION" .PP -dnstap is a flexible, structured binary log format for DNS software: http://dnstap.info +dnstap is a flexible, structured binary log format for DNS software; see http://dnstap.info \[la]http://dnstap.info\[ra]. With this plugin you make CoreDNS output dnstap logging. diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 06be9b3d7a5..774a478e62c 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ERRATIC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ERRATIC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -7,18 +7,14 @@ .SH "DESCRIPTION" .PP -\fIerratic\fP returns a static response to all queries, but the responses can be delayed, dropped or truncated. -The \fIerratic\fP plugin will respond to every A or AAAA query. For any other type it will return -a SERVFAIL response. The reply for A will return 192.0.2.53 (see RFC -5737 -\[la]https://tools.ietf.org/html/rfc5737\[ra], -for AAAA it returns 2001:DB8::53 (see RFC 3849 -\[la]https://tools.ietf.org/html/rfc3849\[ra]) and for an -AXFR request it will respond with a small zone transfer. - -.PP -\fIerratic\fP can also be used in conjunction with the \fIautopath\fP plugin. This is mostly to aid in -testing. +\fIerratic\fP returns a static response to all queries, but the responses can be delayed, +dropped or truncated. The \fIerratic\fP plugin will respond to every A or AAAA query. For +any other type it will return a SERVFAIL response (except AXFR). The reply for A will return +192.0.2.53 (RFC 5737 +\[la]https://tools.ietf.org/html/rfc5737\[ra]), for AAAA it returns 2001:DB8::53 (RFC +3849 +\[la]https://tools.ietf.org/html/rfc3849\[ra]). For an AXFR request it will respond with a small +zone transfer. .SH "SYNTAX" .PP @@ -55,7 +51,7 @@ This plugin reports readiness to the ready plugin. .RS .nf -\&. { +example.org { erratic { drop 3 } @@ -65,13 +61,13 @@ This plugin reports readiness to the ready plugin. .RE .PP -Or even shorter if the defaults suits you. Note this only drops queries, it does not delay them. +Or even shorter if the defaults suit you. Note this only drops queries, it does not delay them. .PP .RS .nf -\&. { +example.org { erratic } @@ -85,7 +81,7 @@ Delay 1 in 3 queries for 50ms .RS .nf -\&. { +example.org { erratic { delay 3 50ms } @@ -101,7 +97,7 @@ Delay 1 in 3 and truncate 1 in 5. .RS .nf -\&. { +example.org { erratic { delay 3 5ms truncate 5 @@ -118,7 +114,7 @@ Drop every second query. .RS .nf -\&. { +example.org { erratic { drop 2 truncate 2 @@ -131,7 +127,6 @@ Drop every second query. .SH "ALSO SEE" .PP RFC 3849 -\[la]https://tools.ietf.org/html/rfc3849\[ra] and -RFC 5737 +\[la]https://tools.ietf.org/html/rfc3849\[ra] and RFC 5737 \[la]https://tools.ietf.org/html/rfc5737\[ra]. diff --git a/man/coredns-errors.7 b/man/coredns-errors.7 index e148d5f1b20..57adeb019b7 100644 --- a/man/coredns-errors.7 +++ b/man/coredns-errors.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ERRORS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ERRORS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIerrors\fP - enable error logging. +\fIerrors\fP - enables error logging. .SH "DESCRIPTION" .PP @@ -33,7 +33,7 @@ Extra knobs are available with an expanded syntax: .nf errors { - consolidate DURATION REGEXP + consolidate DURATION REGEXP } .fi @@ -59,13 +59,13 @@ For better performance, it's recommended to use the \fB\fC^\fR or \fB\fC$\fR met .SH "EXAMPLES" .PP -Use the \fIwhoami\fP to respond to queries and Log errors to standard output. +Use the \fIwhoami\fP to respond to queries in the example.org domain and Log errors to standard output. .PP .RS .nf -\&. { +example.org { whoami errors } diff --git a/man/coredns-etcd.7 b/man/coredns-etcd.7 index 11ffb385b33..f9784196ff3 100644 --- a/man/coredns-etcd.7 +++ b/man/coredns-etcd.7 @@ -1,20 +1,25 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ETCD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ETCD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIetcd\fP - enables reading zone data from an etcd version 3 instance. +\fIetcd\fP - enables SkyDNS service discovery from etcd. .SH "DESCRIPTION" .PP -The data in etcd instance has to be encoded as +The \fIetcd\fP plugin implements the (older) SkyDNS service discovery service. It is \fInot\fP suitable as +a generic DNS zone data plugin. Only a subset of DNS record types are implemented, and subdomains +and delegations are not handled at all. + +.PP +The data in the etcd instance has to be encoded as a message \[la]https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26\[ra] like SkyDNS -\[la]https://github.com/skynetservices/skydns\[ra]. It should also work just like SkyDNS. +\[la]https://github.com/skynetservices/skydns\[ra]. It works just like SkyDNS. .PP -The etcd plugin makes extensive use of the forward plugin to forward and query other servers in the +The etcd plugin makes extensive use of the \fIforward\fP plugin to forward and query other servers in the network. .SH "SYNTAX" @@ -28,7 +33,7 @@ etcd [ZONES...] .RE .IP \(bu 4 -\fBZONES\fP zones etcd should be authoritative for. +\fBZONES\fP zones \fIetcd\fP should be authoritative for. .PP @@ -86,23 +91,23 @@ is needed. .SH "SPECIAL BEHAVIOUR" .PP -CoreDNS etcd plugin leverages directory structure to look for related entries. For example an entry \fB\fC/skydns/test/skydns/mx\fR would have entries like \fB\fC/skydns/test/skydns/mx/a\fR, \fB\fC/skydns/test/skydns/mx/b\fR and so on. Similarly a directory \fB\fC/skydns/test/skydns/mx1\fR will have all \fB\fCmx1\fR entries. +The \fIetcd\fP plugin leverages directory structure to look for related entries. For example +an entry \fB\fC/skydns/test/skydns/mx\fR would have entries like \fB\fC/skydns/test/skydns/mx/a\fR, +\fB\fC/skydns/test/skydns/mx/b\fR and so on. Similarly a directory \fB\fC/skydns/test/skydns/mx1\fR will have all +\fB\fCmx1\fR entries. .PP -With etcd3, support for hierarchical keys are dropped -\[la]https://coreos.com/etcd/docs/latest/learning/api.html\[ra]. This means there are no directories but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup on prefix \fB\fC/skydns/test/skydns/mx/\fR to search for entries like \fB\fC/skydns/test/skydns/mx/a\fR etc, and if there is nothing found on \fB\fC/skydns/test/skydns/mx/\fR, it looks for \fB\fC/skydns/test/skydns/mx\fR to find entries like \fB\fC/skydns/test/skydns/mx1\fR. +With etcd3, support for hierarchical keys are +dropped +\[la]https://coreos.com/etcd/docs/latest/learning/api.html\[ra]. This means there are no directories +but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup +on prefix \fB\fC/skydns/test/skydns/mx/\fR to search for entries like \fB\fC/skydns/test/skydns/mx/a\fR etc, and +if there is nothing found on \fB\fC/skydns/test/skydns/mx/\fR, it looks for \fB\fC/skydns/test/skydns/mx\fR to +find entries like \fB\fC/skydns/test/skydns/mx1\fR. .PP This causes two lookups from CoreDNS to etcdv3 in certain cases. -.SH "MIGRATION TO "\fB\fCetcdv3\fR" API" -.PP -With CoreDNS release \fB\fC1.2.0\fR, you'll need to migrate existing CoreDNS related data (if any) on your etcd server to etcdv3 API. This is because with \fB\fCetcdv3\fR support, CoreDNS can't see the data stored to an etcd server using \fB\fCetcdv2\fR API. - -.PP -Refer this blog by CoreOS team -\[la]https://coreos.com/blog/migrating-applications-etcd-v3.html\[ra] to migrate to etcdv3 API. - .SH "EXAMPLES" .PP This is the default SkyDNS setup, with everything specified in full: @@ -111,15 +116,19 @@ This is the default SkyDNS setup, with everything specified in full: .RS .nf -\&. { - etcd skydns.local { +skydns.local { + etcd { path /skydns endpoint http://localhost:2379 } prometheus - cache 160 skydns.local + cache loadbalance +} + +\&. { forward . 8.8.8.8:53 8.8.4.4:53 + cache } .fi @@ -133,12 +142,16 @@ when resolving external pointing CNAMEs. .RS .nf -\&. { - etcd skydns.local { +skydns.local { + etcd { path /skydns } - cache 160 skydns.local + cache +} + +\&. { forward . /etc/resolv.conf + cache } .fi @@ -159,12 +172,19 @@ etcd skydns.local { .RE .PP -Before getting started with these examples, please setup \fB\fCetcdctl\fR (with \fB\fCetcdv3\fR API) as explained here -\[la]https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html\[ra]. This will help you to put sample keys in your etcd server. +Before getting started with these examples, please setup \fB\fCetcdctl\fR (with \fB\fCetcdv3\fR API) as explained +here +\[la]https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html\[ra]. This will help you to put +sample keys in your etcd server. .PP -If you prefer, you can use \fB\fCcurl\fR to populate the \fB\fCetcd\fR server, but with \fB\fCcurl\fR the endpoint URL depends on the version of \fB\fCetcd\fR. For instance, \fB\fCetcd v3.2\fR or before uses only [CLIENT-URL]/v3alpha/* while \fB\fCetcd v3.5\fR or later uses [CLIENT-URL]/v3/* . Also, Key and Value must be base64 encoded in the JSON payload. With \fB\fCetcdctl\fR these details are automatically taken care off. You can check this document -\[la]https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes\[ra] for details. +If you prefer, you can use \fB\fCcurl\fR to populate the \fB\fCetcd\fR server, but with \fB\fCcurl\fR the +endpoint URL depends on the version of \fB\fCetcd\fR. For instance, \fB\fCetcd v3.2\fR or before uses only +[CLIENT-URL]/v3alpha/* while \fB\fCetcd v3.5\fR or later uses [CLIENT-URL]/v3/* . Also, Key and Value must +be base64 encoded in the JSON payload. With \fB\fCetcdctl\fR these details are automatically taken care +of. You can check this document +\[la]https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes\[ra] +for details. .SS "REVERSE ZONES" .PP @@ -210,7 +230,9 @@ reverse.skydns.local. .SS "ZONE NAME AS A RECORD" .PP -The zone name itself can be used as A record. This behavior can be achieved by writing special entries to the ETCD path of your zone. If your zone is named \fB\fCskydns.local\fR for example, you can create an \fB\fCA\fR record for this zone as follows: +The zone name itself can be used as an \fB\fCA\fR record. This behavior can be achieved by writing special +entries to the ETCD path of your zone. If your zone is named \fB\fCskydns.local\fR for example, you can +create an \fB\fCA\fR record for this zone as follows: .PP .RS diff --git a/man/coredns-federation.7 b/man/coredns-federation.7 deleted file mode 100644 index a7c40872044..00000000000 --- a/man/coredns-federation.7 +++ /dev/null @@ -1,54 +0,0 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FEDERATION" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" - -.SH "NAME" -.PP -\fIfederation\fP - enables federated queries to be resolved via the kubernetes plugin. - -.SH "DESCRIPTION" -.PP -Enabling this plugin allows -Federated -\[la]https://kubernetes.io/docs/tasks/federation/federation-service-discovery/\[ra] queries to be -resolved via the kubernetes plugin. - -.PP -Enabling \fIfederation\fP without also having \fIkubernetes\fP is a noop. - -.SH "SYNTAX" -.PP -.RS - -.nf -federation [ZONES...] { - NAME DOMAIN -} - -.fi -.RE - -.IP \(bu 4 -Each \fBNAME\fP and \fBDOMAIN\fP defines federation membership. One entry for each. A duplicate -\fBNAME\fP will silently overwrite any previous value. - - -.SH "EXAMPLES" -.PP -Here we handle all service requests in the \fB\fCprod\fR and \fB\fCstage\fR federations. - -.PP -.RS - -.nf -\&. { - kubernetes cluster.local - federation cluster.local { - prod prod.feddomain.com - staging staging.feddomain.com - } - forward . 192.168.1.12 -} - -.fi -.RE - diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 8c39d79c051..bdc4e1b049b 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FILE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-FILE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -7,7 +7,7 @@ .SH "DESCRIPTION" .PP -The file plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists +The \fIfile\fP plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists on disk. If the zone file contains signatures (i.e., is signed using DNSSEC), correct DNSSEC answers are returned. Only NSEC is supported! If you use this setup \fIyou\fP are responsible for re-signing the zonefile. @@ -24,7 +24,7 @@ file DBFILE [ZONES...] .IP \(bu 4 \fBDBFILE\fP the database file to read and parse. If the path is relative, the path from the \fIroot\fP -directive will be prepended to it. +plugin will be prepended to it. .IP \(bu 4 \fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block are used. @@ -138,3 +138,7 @@ example.org { .fi .RE +.SH "ALSO SEE" +.PP +See the \fIloadbalance\fP plugin if you need simple record shuffling. + diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index baf4e87b700..40cbd9b856f 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FORWARD" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-FORWARD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -142,27 +142,24 @@ readTimeout by default is 2 sec, and can decrease automatically down to 200ms .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric are exported: .IP \(bu 4 \fB\fCcoredns_forward_request_duration_seconds{to}\fR - duration per upstream interaction. .IP \(bu 4 \fB\fCcoredns_forward_request_count_total{to}\fR - query count per upstream. .IP \(bu 4 -\fB\fCcoredns_forward_response_rcode_total{to, rcode}\fR - count of RCODEs per upstream. +\fB\fCcoredns_forward_response_rcode_count_total{to, rcode}\fR - count of RCODEs per upstream. .IP \(bu 4 \fB\fCcoredns_forward_healthcheck_failure_count_total{to}\fR - number of failed health checks per upstream. .IP \(bu 4 \fB\fCcoredns_forward_healthcheck_broken_count_total{}\fR - counter of when all upstreams are unhealthy, and we are randomly (this always uses the \fB\fCrandom\fR policy) spraying to an upstream. -.IP \(bu 4 -\fB\fCcoredns_forward_socket_count_total{to}\fR - number of cached sockets per upstream. .PP -Where \fB\fCto\fR is one of the upstream servers (\fBTO\fP from the config), \fB\fCproto\fR is the protocol used by -the incoming query ("tcp" or "udp"), and family the transport family ("1" for IPv4, and "2" for -IPv6). +Where \fB\fCto\fR is one of the upstream servers (\fBTO\fP from the config), \fB\fCrcode\fR is the returned RCODE +from the upstream. .SH "EXAMPLES" .PP diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index 29d9a4a9eb7..c0b68bba0cc 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-GRPC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-GRPC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -90,14 +90,14 @@ Also note the TLS config is "global" for the whole grpc proxy if you need a diff .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric are exported: .IP \(bu 4 \fB\fCcoredns_grpc_request_duration_seconds{to}\fR - duration per upstream interaction. .IP \(bu 4 \fB\fCcoredns_grpc_request_count_total{to}\fR - query count per upstream. .IP \(bu 4 -\fB\fCcoredns_grpc_response_rcode_total{to, rcode}\fR - count of RCODEs per upstream. +\fB\fCcoredns_grpc_response_rcode_count_total{to, rcode}\fR - count of RCODEs per upstream. and we are randomly (this always uses the \fB\fCrandom\fR policy) spraying to an upstream. diff --git a/man/coredns-health.7 b/man/coredns-health.7 index 8a114089c97..c8567a2e1bd 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HEALTH" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-HEALTH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -65,11 +65,11 @@ net { .RE .PP -Doing this is supported but both endponts ":8080" and ":8081" will export the exact same health. +Doing this is supported but both endpoints ":8080" and ":8081" will export the exact same health. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_health_request_duration_seconds{}\fR - duration to process a HTTP query to the local diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index 93ad19d3bb9..2f7b6168fe0 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HOSTS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-HOSTS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -13,15 +13,21 @@ plugin only supports A, AAAA, and PTR records. The hosts plugin can be used with available hosts files that block access to advertising servers. .PP -The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the new definitions. -Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the +new definitions. Should the file be deleted, any inlined content will continue to be served. When +the file is restored, it will then again be used. + +.PP +If you want to pass the request to the rest of the plugin chain if there is no match in the \fIhosts\fP +plugin, you must specify the \fB\fCfallthrough\fR option. .PP This plugin can only be used once per Server Block. .SH "THE HOSTS FILE" .PP -Commonly the entries are of the form \fB\fCIP_address canonical_hostname [aliases...]\fR as explained by the hosts(5) man page. +Commonly the entries are of the form \fB\fCIP_address canonical_hostname [aliases...]\fR as explained by +the hosts(5) man page. .PP Examples: @@ -41,7 +47,8 @@ fdfc:a744:27b5:3b0e::1 example.com example .SS "PTR RECORDS" .PP -PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file entries) and cannot be created manually. +PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file +entries) and cannot be created manually. .SH "SYNTAX" .PP @@ -61,7 +68,7 @@ hosts [FILE [ZONES...]] { .IP \(bu 4 \fBFILE\fP the hosts file to read and parse. If the path is relative the path from the \fIroot\fP -directive will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes +plugin will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes every 5 seconds. .IP \(bu 4 \fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block @@ -73,7 +80,10 @@ file path will still be read but entries will be overridden. .IP \(bu 4 \fB\fCttl\fR change the DNS TTL of the records generated (forward and reverse). The default is 3600 seconds (1 hour). .IP \(bu 4 -\fB\fCreload\fR change the period between each hostsfile reload. A time of zero seconds disable the feature. Examples of valid durations: "300ms", "1.5h" or "2h45m" are valid duration with units "ns" (nanosecond), "us" (or "µs" for microsecond), "ms" (millisecond), "s" (second), "m" (minute), "h" (hour). +\fB\fCreload\fR change the period between each hostsfile reload. A time of zero seconds disables the +feature. Examples of valid durations: "300ms", "1.5h" or "2h45m". See Go's +time +\[la]https://godoc.org/time\[ra]. package. .IP \(bu 4 \fB\fCno_reverse\fR disable the automatic generation of the \fB\fCin-addr.arpa\fR or \fB\fCip6.arpa\fR entries for the hosts .IP \(bu 4 @@ -83,6 +93,16 @@ is authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\f queries for those zones will be subject to fallthrough. +.SH "METRICS" +.PP +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: + +.IP \(bu 4 +\fB\fCcoredns_hosts_entries_count{}\fR - The combined number of entries in hosts and Corefile. +.IP \(bu 4 +\fB\fCcoredns_hosts_reload_timestamp_seconds{}\fR - The timestamp of the last reload of hosts file. + + .SH "EXAMPLES" .PP Load \fB\fC/etc/hosts\fR file. @@ -136,11 +156,12 @@ Load hosts file inlined in Corefile. .RS .nf -\&. { - hosts example.hosts example.org { +example.hosts example.org { + hosts { 10.0.0.1 example.org fallthrough } + whoami } .fi diff --git a/man/coredns-import.7 b/man/coredns-import.7 index cc4ab58d466..215a86b211e 100644 --- a/man/coredns-import.7 +++ b/man/coredns-import.7 @@ -1,17 +1,17 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-IMPORT" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-IMPORT" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIimport\fP - include files or reference snippets from a Corefile. +\fIimport\fP - includes files or references snippets from a Corefile. .SH "DESCRIPTION" .PP -The \fIimport\fP plugin can be used to include files into the main configuration. Another use it to +The \fIimport\fP plugin can be used to include files into the main configuration. Another use is to reference predefined snippets. Both can help to avoid some duplication. .PP -This is a unique directive in that \fIimport\fP can appear outside of a server block. In other words, it +This is a unique plugin in that \fIimport\fP can appear outside of a server block. In other words, it can appear at the top of a Corefile where an address would normally be. .SH "SYNTAX" @@ -45,7 +45,7 @@ label surrounded by parentheses: .nf (mysnippet) { - ... + ... } .fi diff --git a/man/coredns-k8s_external.7 b/man/coredns-k8s_external.7 index 8c7a74e9157..5b43fe125d9 100644 --- a/man/coredns-k8s_external.7 +++ b/man/coredns-k8s_external.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-K8S_EXTERNAL" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-K8S_EXTERNAL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIk8s_external\fP - resolve load balancer and external IPs from outside kubernetes clusters. +\fIk8s_external\fP - resolves load balancer and external IPs from outside Kubernetes clusters. .SH "DESCRIPTION" .PP @@ -12,24 +12,24 @@ service. This plugin is only useful if the \fIkubernetes\fP plugin is also loade .PP The plugin uses an external zone to resolve in-cluster IP addresses. It only handles queries for A, -AAAA and SRV records, all others result in NODATA responses. To make it a proper DNS zone it handles +AAAA and SRV records; all others result in NODATA responses. To make it a proper DNS zone, it handles SOA and NS queries for the apex of the zone. .PP -By default the apex of the zone will look like (assuming the zone used is \fB\fCexample.org\fR): +By default the apex of the zone will look like the following (assuming the zone used is \fB\fCexample.org\fR): .PP .RS .nf -example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( - 12345 ; serial - 14400 ; refresh (4 hours) - 3600 ; retry (1 hour) - 604800 ; expire (1 week) - 5 ; minimum (4 hours) - ) -example.org 5 IN NS ns1.dns.example.org. +example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( + 12345 ; serial + 14400 ; refresh (4 hours) + 3600 ; retry (1 hour) + 604800 ; expire (1 week) + 5 ; minimum (4 hours) + ) +example.org 5 IN NS ns1.dns.example.org. ns1.dns.example.org. 5 IN A .... ns1.dns.example.org. 5 IN AAAA .... @@ -38,12 +38,12 @@ ns1.dns.example.org. 5 IN AAAA .... .RE .PP -Note we use the \fB\fCdns\fR subdomain to place the records the DNS needs (see the \fB\fCapex\fR directive). Also +Note that we use the \fB\fCdns\fR subdomain for the records DNS needs (see the \fB\fCapex\fR directive). Also note the SOA's serial number is static. The IP addresses of the nameserver records are those of the CoreDNS service. .PP -The \fIk8s_external\fP plugin handles the subdomain \fB\fCdns\fR and the apex of the zone by itself, all other +The \fIk8s_external\fP plugin handles the subdomain \fB\fCdns\fR and the apex of the zone itself; all other queries are resolved to addresses in the cluster. .SH "SYNTAX" @@ -61,7 +61,7 @@ k8s\_external [ZONE...] .PP -If you want to change the apex domain or use a different TTL for the return records you can use +If you want to change the apex domain or use a different TTL for the returned records you can use this extended syntax. .PP @@ -77,13 +77,13 @@ k8s\_external [ZONE...] { .RE .IP \(bu 4 -\fBAPEX\fP is the name (DNS label) to use the apex records, defaults to \fB\fCdns\fR. +\fBAPEX\fP is the name (DNS label) to use for the apex records; it defaults to \fB\fCdns\fR. .IP \(bu 4 \fB\fCttl\fR allows you to set a custom \fBTTL\fP for responses. The default is 5 (seconds). .PP -Enable names under \fB\fCexample.org\fR to be resolved to in cluster DNS addresses. +Enable names under \fB\fCexample.org\fR to be resolved to in-cluster DNS addresses. .PP .RS @@ -98,7 +98,7 @@ Enable names under \fB\fCexample.org\fR to be resolved to in cluster DNS address .RE .PP -With the Corefile above, the following Service will get an \fB\fCA\fR record for \fB\fCtest.default.example.org\fR with IP address \fB\fC192.168.200.123\fR. +With the Corefile above, the following Service will get an \fB\fCA\fR record for \fB\fCtest.default.example.org\fR with the IP address \fB\fC192.168.200.123\fR. .PP .RS diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index 6d7cfc555b8..21596ed94d7 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-KUBERNETES" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-KUBERNETES" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIkubernetes\fP - enables the reading zone data from a Kubernetes cluster. +\fIkubernetes\fP - enables reading zone data from a Kubernetes cluster. .SH "DESCRIPTION" .PP @@ -37,7 +37,7 @@ kubernetes [ZONES...] .RE .PP -With only the directive specified, the \fIkubernetes\fP plugin will default to the zone specified in +With only the plugin specified, the \fIkubernetes\fP plugin will default to the zone specified in the server's block. It will handle all queries in that zone and connect to Kubernetes in-cluster. It will not provide PTR records for services or A records for pods. If \fBZONES\fP is used it specifies all the zones the plugin should be authoritative for. @@ -47,7 +47,6 @@ all the zones the plugin should be authoritative for. .nf kubernetes [ZONES...] { - resyncperiod DURATION endpoint URL tls CERT KEY CACERT kubeconfig KUBECONFIG CONTEXT @@ -65,9 +64,6 @@ kubernetes [ZONES...] { .fi .RE -.IP \(bu 4 -\fB\fCresyncperiod\fR specifies the Kubernetes data API \fBDURATION\fP period. By -default resync is disabled (DURATION is zero). .IP \(bu 4 \fB\fCendpoint\fR specifies the \fBURL\fP for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. @@ -306,15 +302,12 @@ For example, wildcards can be used to resolve all Endpoints for a Service as \fB .RS .nf -*.service.default.svc.cluster.local. 5 IN A 192.168.10.10 -*.service.default.svc.cluster.local. 5 IN A 192.168.25.15 +*.service.default.svc.cluster.local. 5 IN A 192.168.10.10 +*.service.default.svc.cluster.local. 5 IN A 192.168.25.15 .fi .RE -.PP -This response can be randomized using the \fB\fCloadbalance\fR plugin - .SH "METADATA" .PP The kubernetes plugin will publish the following metadata, if the \fImetadata\fP @@ -338,3 +331,31 @@ kubernetes/client-namespace: the client pod's namespace, if \fB\fCpods verified\ kubernetes/client-pod-name: the client pod's name, if \fB\fCpods verified\fR mode is enabled +.SH "METRICS" +.PP +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: + +.IP \(bu 4 +\fB\fCcoredns_kubernetes_dns_programming_duration_seconds{service_kind}\fR - Exports the +DNS programming latency SLI +\[la]https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md\[ra]. +The metrics has the \fB\fCservice_kind\fR label that identifies the kind of the +kubernetes service +\[la]https://kubernetes.io/docs/concepts/services-networking/service\[ra]. +It may take one of the three values: + +.RS +.IP \(en 4 +\fB\fCcluster_ip\fR +.IP \(en 4 +\fB\fCheadless_with_selector\fR +.IP \(en 4 +\fB\fCheadless_without_selector\fR + +.RE + + +.SH "BUGS" +.PP +The duration metric only supports the "headless\fIwith\fPselector" service currently. + diff --git a/man/coredns-loadbalance.7 b/man/coredns-loadbalance.7 index f10d44d491f..0b34f522a4d 100644 --- a/man/coredns-loadbalance.7 +++ b/man/coredns-loadbalance.7 @@ -1,18 +1,18 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOADBALANCE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOADBALANCE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIloadbalance\fP - randomize the order of A, AAAA and MX records. +\fIloadbalance\fP - randomizes the order of A, AAAA and MX records. .SH "DESCRIPTION" .PP -The \fIloadbalance\fP will act as a round-robin DNS loadbalancer by randomizing the order of A, AAAA, +The \fIloadbalance\fP will act as a round-robin DNS load balancer by randomizing the order of A, AAAA, and MX records in the answer. .PP See Wikipedia -\[la]https://en.wikipedia.org/wiki/Round-robin_DNS\[ra] about the pros and cons on this +\[la]https://en.wikipedia.org/wiki/Round-robin_DNS\[ra] about the pros and cons of this setup. It will take care to sort any CNAMEs before any address records, because some stub resolver implementations (like glibc) are particular about that. @@ -27,7 +27,7 @@ loadbalance [POLICY] .RE .IP \(bu 4 -\fBPOLICY\fP is how to balance, the default, and only option, is "round_robin". +\fBPOLICY\fP is how to balance. The default, and only option, is "round_robin". .SH "EXAMPLES" diff --git a/man/coredns-log.7 b/man/coredns-log.7 index 4c5c8be6865..d1e7e653821 100644 --- a/man/coredns-log.7 +++ b/man/coredns-log.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOG" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOG" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -155,7 +155,7 @@ Each of these logs will be outputted with \fB\fClog.Infof\fR, so a typical examp .RS .nf -2018\-10\-30T19:10:07.547Z [INFO] [::1]:50759 \- 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s +[INFO] [::1]:50759 \- 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s ~~~~ ## Examples diff --git a/man/coredns-loop.7 b/man/coredns-loop.7 index 7d11b0fb72f..b1baa91ea56 100644 --- a/man/coredns-loop.7 +++ b/man/coredns-loop.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOOP" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOOP" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIloop\fP - detect simple forwarding loops and halt the server. +\fIloop\fP - detects simple forwarding loops and halts the server. .SH "DESCRIPTION" .PP diff --git a/man/coredns-metadata.7 b/man/coredns-metadata.7 index 8341e146a38..7c6de649f25 100644 --- a/man/coredns-metadata.7 +++ b/man/coredns-metadata.7 @@ -1,31 +1,31 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-METADATA" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-METADATA" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fImetadata\fP - enable a meta data collector. +\fImetadata\fP - enables a metadata collector. .SH "DESCRIPTION" .PP By enabling \fImetadata\fP any plugin that implements metadata.Provider interface \[la]https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider\[ra] will be called for -each DNS query, at beginning of the process for that query, in order to add it's own meta data to +each DNS query, at the beginning of the process for that query, in order to add its own metadata to context. .PP -The meta data collected will be available for all plugins, via the Context parameter provided in the +The metadata collected will be available for all plugins, via the Context parameter provided in the ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve metadata a plugin might be interested in. .PP -The meta data is added by setting a label with a value in the context. These labels should be named +The metadata is added by setting a label with a value in the context. These labels should be named \fB\fCplugin/NAME\fR, where \fBNAME\fP is something descriptive. The only hard requirement the \fImetadata\fP -plugin enforces is that the labels contains a slash. See the documentation for +plugin enforces is that the labels contain a slash. See the documentation for \fB\fCmetadata.SetValueFunc\fR. .PP -The value stored is a string. The empty string signals "no meta data". See the documentation for +The value stored is a string. The empty string signals "no metadata". See the documentation for \fB\fCmetadata.ValueFunc\fR on how to retrieve this. .SH "SYNTAX" diff --git a/man/coredns-metrics.7 b/man/coredns-metrics.7 index 057c60f99d7..ca895cacbf0 100644 --- a/man/coredns-metrics.7 +++ b/man/coredns-metrics.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-METRICS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-METRICS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-nsid.7 b/man/coredns-nsid.7 index d22b18af2e9..359b79384cf 100644 --- a/man/coredns-nsid.7 +++ b/man/coredns-nsid.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-NSID" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-NSID" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -39,7 +39,7 @@ Enable nsid: .RS .nf -\&. { +example.org { whoami nsid Use The Force } @@ -66,7 +66,7 @@ And now a client with NSID support will see an OPT record with the NSID option: ; EDNS: version: 0, flags:; udp: 4096 ; NSID: 55 73 65 20 54 68 65 20 46 6f 72 63 65 ("Use The Force") ;; QUESTION SECTION: -;whoami.example.org. IN A +;whoami.example.org. IN A .fi .RE diff --git a/man/coredns-pprof.7 b/man/coredns-pprof.7 index f5ef12858f8..092336943d4 100644 --- a/man/coredns-pprof.7 +++ b/man/coredns-pprof.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-PPROF" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-PPROF" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-ready.7 b/man/coredns-ready.7 index e0d36358f6d..fdcaad6c62c 100644 --- a/man/coredns-ready.7 +++ b/man/coredns-ready.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-READY" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-READY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -15,7 +15,7 @@ will not be queried again. .PP Each Server Block that enables the \fIready\fP plugin will have the plugins \fIin that server block\fP report readiness into the /ready endpoint that runs on the same port. This also means that the -\fIsame\fP plugin with different configurations (in potentialy \fIdifferent\fP Server Blocks) will have +\fIsame\fP plugin with different configurations (in potentially \fIdifferent\fP Server Blocks) will have their readiness reported as the union of their respective readinesses. .SH "SYNTAX" diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index 1f2efbab02f..8265abc6595 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-RELOAD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-RELOAD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -48,17 +48,15 @@ reload [INTERVAL] [JITTER] .fi .RE +.PP +The plugin will check for changes every \fBINTERVAL\fP, subject to +/- the \fBJITTER\fP duration. + .IP \(bu 4 -The plugin will check for changes every \fBINTERVAL\fP, subject to +/- the \fBJITTER\fP duration -.IP \(bu 4 -\fBINTERVAL\fP and \fBJITTER\fP are Golang (durations)[https://golang.org/pkg/time/#ParseDuration -\[la]https://golang.org/pkg/time/#ParseDuration\[ra]] -.IP \(bu 4 -Default \fBINTERVAL\fP is 30s, default \fBJITTER\fP is 15s -.IP \(bu 4 -Minimal value for \fBINTERVAL\fP is 2s, and for \fBJITTER\fP is 1s -.IP \(bu 4 -If \fBJITTER\fP is more than half of \fBINTERVAL\fP, it will be set to half of \fBINTERVAL\fP +\fBINTERVAL\fP and \fBJITTER\fP are Golang durations +\[la]https://golang.org/pkg/time/#ParseDuration\[ra]. +The default \fBINTERVAL\fP is 30s, default \fBJITTER\fP is 15s, the minimal value for \fBINTERVAL\fP +is 2s, and for \fBJITTER\fP it is 1s. If \fBJITTER\fP is more than half of \fBINTERVAL\fP, it will be +set to half of \fBINTERVAL\fP .SH "EXAMPLES" @@ -102,8 +100,8 @@ where the reload fails, and you loose functionality. Consider the following Core .nf \&. { - health :8080 - whoami + health :8080 + whoami } .fi @@ -125,18 +123,19 @@ fail loading the new Corefile, abort and keep using the old process .PP After the aborted attempt to reload we are left with the old processes running, but the listener is -closed in step 1; so the health endpoint is broken. The same can hopen in the prometheus metrics plugin. +closed in step 1; so the health endpoint is broken. The same can happen in the prometheus metrics plugin. .PP In general be careful with assigning new port and expecting reload to work fully. .PP -Also any \fB\fCimport\fR statement is not discovered by this plugin. This means if any of these imported files -changes the \fIreload\fP plugin is ignorant of that fact. +In CoreDNS v1.6.0 and earlier any \fB\fCimport\fR statements are not discovered by this plugin. +This means if any of these imported files changes the \fIreload\fP plugin is ignorant of that fact. +CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes in imported files. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_reload_failed_count_total{}\fR - counts the number of failed reload attempts. diff --git a/man/coredns-rewrite.7 b/man/coredns-rewrite.7 index d3adccce45e..b7afff2be94 100644 --- a/man/coredns-rewrite.7 +++ b/man/coredns-rewrite.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-REWRITE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-REWRITE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-root.7 b/man/coredns-root.7 index 9c4ee7707ab..aee1865b5a4 100644 --- a/man/coredns-root.7 +++ b/man/coredns-root.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ROOT" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ROOT" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-route53.7 b/man/coredns-route53.7 index 68e10a4ff08..276b6a7a00a 100644 --- a/man/coredns-route53.7 +++ b/man/coredns-route53.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ROUTE53" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ROUTE53" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -19,9 +19,10 @@ The route53 plugin can be used when coredns is deployed on AWS or elsewhere. .nf route53 [ZONE:HOSTED\_ZONE\_ID...] { - [aws\_access\_key AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY] + aws\_access\_key [AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] + refresh DURATION } .fi @@ -48,23 +49,35 @@ zone. \fBFILENAME\fP AWS credentials filename. Defaults to \fB\fC~/.aws/credentials\fR are used. .IP \(bu 4 \fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. -If \fB[ZONES...]\fP is omitted, then fallthrough happens for all zones for which the plugin is +If \fBZONES\fP is omitted, then fallthrough happens for all zones for which the plugin is authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\fR and \fB\fCip6.arpa\fR), then only queries for those zones will be subject to fallthrough. .IP \(bu 4 -\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration +block. +.IP \(bu 4 +\fB\fCrefresh\fR can be used to control how long between record retrievals from Route 53. It requires +a duration string as a parameter to specify the duration between update cycles. Each update +cycle may result in many AWS API calls depending on how many domains use this plugin and how +many records are in each. Adjusting the update frequency may help reduce the potential of API +rate-limiting imposed by AWS. +.IP \(bu 4 +\fBDURATION\fP A duration string. Defaults to \fB\fC1m\fR. If units are unspecified, seconds are assumed. .SH "EXAMPLES" .PP -Enable route53 with implicit AWS credentials and and resolve CNAMEs via 10.0.0.1: +Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: .PP .RS .nf +example.org { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 +} + \&. { - route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 forward . 10.0.0.1 } @@ -78,7 +91,7 @@ Enable route53 with explicit AWS credentials: .RS .nf -\&. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { aws\_access\_key AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY } @@ -110,10 +123,26 @@ Enable route53 with multiple hosted zones with the same domain: .RS .nf -\&. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 example.org.:Z93A52145678156 } .fi .RE +.PP +Enable route53 and refresh records every 3 minutes + +.PP +.RS + +.nf +example.org { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { + refresh 3m + } +} + +.fi +.RE + diff --git a/man/coredns-secondary.7 b/man/coredns-secondary.7 index b3f6ac3627e..b1ce5afbcd7 100644 --- a/man/coredns-secondary.7 +++ b/man/coredns-secondary.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-SECONDARY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-SECONDARY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 new file mode 100644 index 00000000000..7d780b60ba4 --- /dev/null +++ b/man/coredns-sign.7 @@ -0,0 +1,228 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-SIGN" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIsign\fP - adds DNSSEC records to zone files. + +.SH "DESCRIPTION" +.PP +The \fIsign\fP plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are +added. The signatures that sign the resource records sets have an expiration date, this means the +signing process must be repeated before this expiration data is reached. Otherwise the zone's data +will go BAD (RFC 4035, Section 5.5). The \fIsign\fP plugin takes care of this. + +.PP +Only NSEC is supported, \fIsign\fP does not support NSEC3. + +.PP +\fISign\fP works in conjunction with the \fIfile\fP and \fIauto\fP plugins; this plugin \fBsigns\fP the zones +files, \fIauto\fP and \fIfile\fP \fBserve\fP the zones \fIdata\fP. + +.PP +For this plugin to work at least one Common Signing Key, (see coredns-keygen(1)) is needed. This key +(or keys) will be used to sign the entire zone. \fISign\fP does not support the ZSK/KSK split, nor will +it do key or algorithm rollovers - it just signs. + +.PP +\fISign\fP will: + +.IP \(bu 4 +(Re)-sign the zone with the CSK(s) when: + +.RS +.IP \(en 4 +the last time it was signed is more than a 6 days ago. Each zone will have some jitter +applied to the inception date. +.IP \(en 4 +the signature only has 14 days left before expiring. + +.RE + + +Both these dates are only checked on the SOA's signature(s). +.IP \(bu 4 +Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours) +and a expiration of +32 days for every given DNSKEY. +.IP \(bu 4 +Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the +SOA record. +.IP \(bu 4 +Add or replace \fIall\fP apex CDS/CDNSKEY records with the ones derived from the given keys. For +each key two CDS are created one with SHA1 and another with SHA256. +.IP \(bu 4 +Update the SOA's serial number to the \fIUnix epoch\fP of when the signing happens. This will +overwrite \fIany\fP previous serial number. + + +.PP +There are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it will +be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. + +.PP +Keys are named (following BIND9): \fB\fCK++.key\fR and \fB\fCK++.private\fR. +The keys \fBmust not\fP be included in your zone; they will be added by \fIsign\fP. These keys can be +generated with \fB\fCcoredns-keygen\fR or BIND9's \fB\fCdnssec-keygen\fR. You don't have to adhere to this naming +scheme, but then you need to name your keys explicitly, see the \fB\fCkeys file\fR directive. + +.PP +A generated zone is written out in a file named \fB\fCdb..signed\fR in the directory named by the +\fB\fCdirectory\fR directive (which defaults to \fB\fC/var/lib/coredns\fR). + +.SH "SYNTAX" +.PP +.RS + +.nf +sign DBFILE [ZONES...] { + key file|directory KEY...|DIR... + directory DIR +} + +.fi +.RE + +.IP \(bu 4 +\fBDBFILE\fP the zone database file to read and parse. If the path is relative, the path from the +\fIroot\fP plugin will be prepended to it. +.IP \(bu 4 +\fBZONES\fP zones it should be sign for. If empty, the zones from the configuration block are +used. +.IP \(bu 4 +\fB\fCkey\fR specifies the key(s) (there can be multiple) to sign the zone. If \fB\fCfile\fR is +used the \fBKEY\fP's filenames are used as is. If \fB\fCdirectory\fR is used, \fIsign\fP will look in \fBDIR\fP +for \fB\fCK++\fR files. Any metadata in these files (Activate, Publish, etc.) is +\fIignored\fP. These keys must also be Key Signing Keys (KSK). +.IP \(bu 4 +\fB\fCdirectory\fR specifies the \fBDIR\fP where CoreDNS should save zones that have been signed. +If not given this defaults to \fB\fC/var/lib/coredns\fR. The zones are saved under the name +\fB\fCdb..signed\fR. If the path is relative the path from the \fIroot\fP plugin will be prepended +to it. + + +.PP +Keys can be generated with \fB\fCcoredns-keygen\fR, to create one for use in the \fIsign\fP plugin, use: +\fB\fCcoredns-keygen example.org\fR or \fB\fCdnssec-keygen -a ECDSAP256SHA256 -f KSK example.org\fR. + +.SH "EXAMPLES" +.PP +Sign the \fB\fCexample.org\fR zone contained in the file \fB\fCdb.example.org\fR and write the result to +\fB\fC./db.example.org.signed\fR to let the \fIfile\fP plugin pick it up and serve it. The keys used +are read from \fB\fC/etc/coredns/keys/Kexample.org.key\fR and \fB\fC/etc/coredns/keys/Kexample.org.private\fR. + +.PP +.RS + +.nf +example.org { + file db.example.org.signed + + sign db.example.org { + key file /etc/coredns/keys/Kexample.org + directory . + } +} + +.fi +.RE + +.PP +Running this leads to the following log output (note the timers in this example have been set to +shorter intervals). + +.PP +.RS + +.nf +[WARNING] plugin/file: Failed to open "open /tmp/db.example.org.signed: no such file or directory": trying again in 1m0s +[INFO] plugin/sign: Signing "example.org." because open /tmp/db.example.org.signed: no such file or directory +[INFO] plugin/sign: Successfully signed zone "example.org." in "/tmp/db.example.org.signed" with key tags "59725" and 1564766865 SOA serial, elapsed 9.357933ms, next: 2019\-08\-02T22:27:45.270Z +[INFO] plugin/file: Successfully reloaded zone "example.org." in "/tmp/db.example.org.signed" with serial 1564766865 + +.fi +.RE + +.PP +Or use a single zone file for \fImultiple\fP zones, note that the \fBZONES\fP are repeated for both plugins. +Also note this outputs \fImultiple\fP signed output files. Here we use the default output directory +\fB\fC/var/lib/coredns\fR. + +.PP +.RS + +.nf +\&. { + file /var/lib/coredns/db.example.org.signed example.org + file /var/lib/coredns/db.example.net.signed example.net + sign db.example.org example.org example.net { + key directory /etc/coredns/keys + } +} + +.fi +.RE + +.PP +This is the same configuration, but the zones are put in the server block, but note that you still +need to specify what file is served for what zone in the \fIfile\fP plugin: + +.PP +.RS + +.nf +example.org example.net { + file var/lib/coredns/db.example.org.signed example.org + file var/lib/coredns/db.example.net.signed example.net + sign db.example.org { + key directory /etc/coredns/keys + } +} + +.fi +.RE + +.PP +Be careful to fully list the origins you want to sign, if you don't: + +.PP +.RS + +.nf +example.org example.net { + sign plugin/sign/testdata/db.example.org miek.org { + key file /etc/coredns/keys/Kexample.org + } +} + +.fi +.RE + +.PP +This will lead to \fB\fCdb.example.org\fR be signed \fItwice\fP, as this entire section is parsed twice because +you have specified the origins \fB\fCexample.org\fR and \fB\fCexample.net\fR in the server block. + +.PP +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep +on serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +file. + +.SH "ALSO SEE" +.PP +The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 6781. Further more the +manual pages coredns-keygen(1) and dnssec-keygen(8). And the \fIfile\fP plugin's documentation. + +.PP +Coredns-keygen can be found at +https://github.com/coredns/coredns-utils +\[la]https://github.com/coredns/coredns-utils\[ra] in the +coredns-keygen directory. + +.PP +Other useful DNSSEC tools can be found in ldns +\[la]https://nlnetlabs.nl/projects/ldns/about/\[ra], e.g. +\fB\fCldns-key2ds\fR to create DS records from DNSKEYs. + +.SH "BUGS" +.PP +\fB\fCkeys directory\fR is not implemented. + diff --git a/man/coredns-template.7 b/man/coredns-template.7 index c79cee38fb3..9f18b749629 100644 --- a/man/coredns-template.7 +++ b/man/coredns-template.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TEMPLATE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TEMPLATE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -93,7 +93,7 @@ The output of the template must be a RFC 1035 .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_template_matches_total{server, regex}\fR the total number of matched requests by regex. diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index 5674afca65b..ef72fb1d98c 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TLS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TLS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -14,7 +14,7 @@ all (DNSSEC only signs resource records). .PP The \fItls\fP "plugin" allows you to configure the cryptographic keys that are needed for both -DNS-over-TLS and DNS-over-gRPC. If the \fB\fCtls\fR directive is omitted, then no encryption takes place. +DNS-over-TLS and DNS-over-gRPC. If the \fItls\fP plugin is omitted, then no encryption takes place. .PP The gRPC protobuffer is defined in \fB\fCpb/dns.proto\fR. It defines the proto as a simple wrapper for the @@ -60,8 +60,8 @@ nameservers defined in \fB\fC/etc/resolv.conf\fR to resolve the query. This prox .nf tls://.:5553 { - tls cert.pem key.pem ca.pem - forward . /etc/resolv.conf + tls cert.pem key.pem ca.pem + forward . /etc/resolv.conf } .fi @@ -76,8 +76,8 @@ incoming queries. .nf grpc://. { - tls cert.pem key.pem ca.pem - forward . /etc/resolv.conf + tls cert.pem key.pem ca.pem + forward . /etc/resolv.conf } .fi diff --git a/man/coredns-trace.7 b/man/coredns-trace.7 index 265b43069e8..bf58f3e9e1a 100644 --- a/man/coredns-trace.7 +++ b/man/coredns-trace.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TRACE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TRACE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -41,9 +41,9 @@ Additional features can be enabled with this syntax: .nf trace [ENDPOINT\-TYPE] [ENDPOINT] { - every AMOUNT - service NAME - client\_server + every AMOUNT + service NAME + client\_server } .fi @@ -132,9 +132,9 @@ Trace one query every 10000 queries, rename the service, and enable same span: .nf trace tracinghost:9411 { - every 10000 - service dnsproxy - client\_server + every 10000 + service dnsproxy + client\_server } .fi diff --git a/man/coredns-transfer.7 b/man/coredns-transfer.7 new file mode 100644 index 00000000000..6bb47364269 --- /dev/null +++ b/man/coredns-transfer.7 @@ -0,0 +1,41 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TRANSFER" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fItransfer\fP - perform zone transfers for other plugins. + +.SH "DESCRIPTION" +.PP +This plugin answers zone transfers for authoritative plugins that implement +\fB\fCtransfer.Transferer\fR. + +.PP +Transfer answers full zone transfer (AXFR) requests and incremental zone transfer (IXFR) requests +with AXFR fallback if the zone has changed. + +.PP +Notifies are not currently supported. + +.SH "SYNTAX" +.PP +.RS + +.nf +transfer [ZONE...] { + to HOST... +} + +.fi +.RE + +.IP \(bu 4 +\fBZONES\fP The zones \fItransfer\fP will answer zone requests for. If left blank, +the zones are inherited from the enclosing server block. To answer zone +transfers for a given zone, there must be another plugin in the same server +block that serves the same zone, and implements \fB\fCtransfer.Transferer\fR. +.IP \(bu 4 +\fB\fCto\fR \fBHOST...\fP The hosts \fItransfer\fP will transfer to. Use \fB\fC*\fR to permit +transfers to all hosts. + + diff --git a/man/coredns-whoami.7 b/man/coredns-whoami.7 index 6927005736d..860a4230c64 100644 --- a/man/coredns-whoami.7 +++ b/man/coredns-whoami.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-WHOAMI" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-WHOAMI" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -50,7 +50,7 @@ Start a server on the default port and load the \fIwhoami\fP plugin. .RS .nf -\&. { +example.org { whoami } diff --git a/man/coredns.1 b/man/coredns.1 index 2e4824ce1c5..022c9da8be8 100644 --- a/man/coredns.1 +++ b/man/coredns.1 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS" 1 "April 2019" "CoreDNS" "CoreDNS" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS" 1 "October 2019" "CoreDNS" "CoreDNS" .SH "COREDNS" .PP -\fIcoredns\fP - plugable DNS nameserver optimized for service discovery and flexibility. +\fIcoredns\fP - pluggable DNS nameserver optimized for service discovery and flexibility. .SH "SYNOPSIS" .PP @@ -32,9 +32,6 @@ Available options: specify Corefile to load, if not given CoreDNS will look for a \fB\fCCorefile\fR in the current directory. .TP -\fB-cpu\fP \fBCAP\fP -specify maximum CPU capacity in percent. -.TP \fB-dns.port\fP \fBPORT\fP override default port (53) to listen on. .TP @@ -61,5 +58,5 @@ Apache License 2.0 .SH "SEE ALSO" .PP -Corefile(5) coredns-k8s_external(7) coredns-erratic(7) coredns-nsid(7) coredns-hosts(7) coredns-dnssec(7) coredns-health(7) coredns-grpc(7) coredns-ready(7) coredns-file(7) coredns-root(7) coredns-autopath(7) coredns-auto(7) coredns-dnstap(7) coredns-pprof(7) coredns-tls(7) coredns-loadbalance(7) coredns-cache(7) coredns-whoami(7) coredns-bind(7) coredns-loop(7) coredns-import(7) coredns-chaos(7) coredns-template(7) coredns-log(7) coredns-kubernetes(7) coredns-forward(7) coredns-debug(7) coredns-secondary(7) coredns-route53(7) coredns-errors(7) coredns-metrics(7) coredns-reload(7) coredns-rewrite(7) coredns-metadata(7) coredns-federation(7) coredns-etcd(7) coredns-cancel(7) coredns-trace(7). +Corefile(5) coredns-k8s_external(7) coredns-erratic(7) coredns-nsid(7) coredns-any(7) coredns-hosts(7) coredns-acl(7) coredns-dnssec(7) coredns-health(7) coredns-grpc(7) coredns-sign(7) coredns-file(7) coredns-root(7) coredns-autopath(7) coredns-auto(7) coredns-clouddns(7) coredns-dnstap(7) coredns-pprof(7) coredns-tls(7) coredns-loadbalance(7) coredns-cache(7) coredns-ready(7) coredns-whoami(7) coredns-bind(7) coredns-loop(7) coredns-import(7) coredns-chaos(7) coredns-template(7) coredns-azure(7) coredns-log(7) coredns-kubernetes(7) coredns-forward(7) coredns-debug(7) coredns-secondary(7) coredns-route53(7) coredns-errors(7) coredns-metrics(7) coredns-reload(7) coredns-rewrite(7) coredns-metadata(7) coredns-etcd(7) coredns-cancel(7) coredns-trace(7). diff --git a/notes/coredns-002.md b/notes/coredns-002.md index 0a326750729..660da854d24 100644 --- a/notes/coredns-002.md +++ b/notes/coredns-002.md @@ -77,7 +77,7 @@ CoreDNS is a DNS server that chains plugins, where each plugin implements a DNS * plugin/dnssec: replaced go-cache with golang-lru in dnssec. Also adds a `cache_capacity`. option in dnssec plugin so that the capacity of the LRU cache could be specified in the config file. -* plugin/logging: allow a response classs to be specified on log on responses matching the name *and* +* plugin/logging: allow a response class to be specified on log on responses matching the name *and* the response class. For instance only log denials for example.com: ~~~ corefile diff --git a/notes/coredns-003.md b/notes/coredns-003.md index c1656e7a1c0..ce43b6842cb 100644 --- a/notes/coredns-003.md +++ b/notes/coredns-003.md @@ -28,7 +28,7 @@ Refused queries are properly logged and exported if metrics are enabled. * *auto*: numerous bugfixes. * *file*: fix data race in reload process and also reload a zone when it is `mv`ed (newly created) into place. Also rewrite the zone lookup algorithm and be more standards compliant, esp. in the area of DNSSEC, wildcards and empty-non-terminals; handle secure delegations. -* *kubernetes*: vender the k8s dependency and updates to be compatible with Kubernetes 1.4 and 1.5. +* *kubernetes*: vendor the k8s dependency and updates to be compatible with Kubernetes 1.4 and 1.5. Multiple cleanups and fixes. Kubernetes services can now be resolved. # Contributors diff --git a/notes/coredns-004.md b/notes/coredns-004.md index b19b8b14258..5360d13597f 100644 --- a/notes/coredns-004.md +++ b/notes/coredns-004.md @@ -24,7 +24,7 @@ We are now also releasing an ARM build that can run on Raspberry Pi. * *file|auto*: include zone's NSset in positive responses. * *auto*: close files and don't leak file descriptors. * *httpproxy*: new plugin that proxies to and resolves your requests over an encrypted connection. This plugin will probably be morphed into proxy at some point in the feature. Consider it experimental for the time being. -* *metrics*: `reponse_size_bytes` and `request_size_bytes` export the actual length of the packet, not the advertised bufsize. +* *metrics*: `response_size_bytes` and `request_size_bytes` export the actual length of the packet, not the advertised bufsize. * *log*: `{size}` is now the length in bytes of the request, `{rsize}` for the reply. Default logging is changed to show both. # Contributors diff --git a/notes/coredns-006.md b/notes/coredns-006.md index be0b1289c23..981949f0ff5 100644 --- a/notes/coredns-006.md +++ b/notes/coredns-006.md @@ -29,7 +29,7 @@ Fixed: ### New -* *reverse* plugin: allows CoreDNS to respond dynamicly to an PTR request and the related +* *reverse* plugin: allows CoreDNS to respond dynamically to an PTR request and the related A/AAAA request. ### Improvements/changes diff --git a/notes/coredns-007.md b/notes/coredns-007.md index f7ea3b45d43..d723c5fef6c 100644 --- a/notes/coredns-007.md +++ b/notes/coredns-007.md @@ -30,7 +30,7 @@ Back to the release. * gRPC and TLS are made first class citizens. See [the zone specification](https://github.com/coredns/coredns/blob/master/README.md#zone-specification) on how to use it. TL;DR using `grpc://` makes the server talk gRPC. The `tls` directive is used to - specify TLS certifcates. + specify TLS certificates. * Zipkin tracing can be enabled for all plugin. # Plugins diff --git a/notes/coredns-1.0.1.md b/notes/coredns-1.0.1.md index 216afd46e34..e3cb5589893 100644 --- a/notes/coredns-1.0.1.md +++ b/notes/coredns-1.0.1.md @@ -16,7 +16,7 @@ One new plugin was added: *nsid*, that implements [RFC 5001](https://tools.ietf. ## Plugins * *file* fixes a crash when an request with a DO bit (pretty much the default) hits an unsigned zone. The default configuration should recover the go-routine, but this is nonetheless serious. *file* received some other fixes when returning (secure) delegations. * *dnstap* plugin is now 50% faster. -* *metrics* fixed the start time bucket for duration. +* *metrics* fixed the start time bucket for the duration. ## Contributors diff --git a/notes/coredns-1.0.2.md b/notes/coredns-1.0.2.md index 4dbd3c1bf6c..413a660809c 100644 --- a/notes/coredns-1.0.2.md +++ b/notes/coredns-1.0.2.md @@ -9,7 +9,7 @@ author = "coredns" +++ We are pleased to announce the [release](https://github.com/coredns/coredns/releases/tag/v1.0.2) of CoreDNS-1.0.2! -This release can be summerized as "help external plugin developers" as most changes are geared +This release can be summarized as "help external plugin developers" as most changes are geared towards exposing CoreDNS functionality to make this as easy as possible. Is also a fairly small release. diff --git a/notes/coredns-1.2.6.md b/notes/coredns-1.2.6.md index f8fbdeec978..1e383013784 100644 --- a/notes/coredns-1.2.6.md +++ b/notes/coredns-1.2.6.md @@ -27,7 +27,7 @@ kernels. * [*cache*](/plugins/cache) got some minor optimizations. -* [*errors*](/plugins/errors) (and *log*) gotten a new option (`consolidate`) to suppress loging. +* [*errors*](/plugins/errors) (and *log*) gotten a new option (`consolidate`) to suppress logging. * [*hosts*](/plugins/hosts) will now read the `hosts` file without holding a write lock. diff --git a/notes/coredns-1.3.1.md b/notes/coredns-1.3.1.md index e129bdfdfc7..acab8bd949a 100644 --- a/notes/coredns-1.3.1.md +++ b/notes/coredns-1.3.1.md @@ -26,7 +26,7 @@ Mostly documentation updates in various plugins. Plus a small fix where we stop * [*log*](/plugins/log) now allows multiple names to be specified. - * [*import*](/plugins/import) was added to give it a README.md to make it's documentation more + * [*import*](/plugins/import) was added to give it a README.md to make its documentation more discoverable. * [*kubernetes*](/plugins/kubernetes) `TTL` is also applied to negative responses (NXDOMAIN, etc). diff --git a/notes/coredns-1.5.2.md b/notes/coredns-1.5.2.md index d6224a0b578..5e5c3ff253a 100644 --- a/notes/coredns-1.5.2.md +++ b/notes/coredns-1.5.2.md @@ -10,8 +10,9 @@ author = "coredns" The CoreDNS team has released [CoreDNS-1.5.2](https://github.com/coredns/coredns/releases/tag/v1.5.2). -Small bugfixes and a change to Caddy's important path. - +Small bugfixes and a change to Caddy's import path (mholt/caddy -> caddyserver/caddy). Doing +a release helps plugins deal with the change better. + # Plugins * For all plugins that use the `upstream` directive it use removed from the documentation; it's still accepted @@ -31,7 +32,7 @@ Yong Tang. ## Noteworthy Changes -* plugin/file: close correctlty after AXFR (https://github.com/coredns/coredns/pull/2943) +* plugin/file: close correctly after AXFR (https://github.com/coredns/coredns/pull/2943) * plugin/file: load secondary zones lazily on startup (https://github.com/coredns/coredns/pull/2944) * plugin/template: support metadata (https://github.com/coredns/coredns/pull/2958) * build: Update Caddy to 1.0.1, and update import path (https://github.com/coredns/coredns/pull/2961) diff --git a/notes/coredns-1.6.0.md b/notes/coredns-1.6.0.md new file mode 100644 index 00000000000..f5414fccebd --- /dev/null +++ b/notes/coredns-1.6.0.md @@ -0,0 +1,62 @@ ++++ +title = "CoreDNS-1.6.0 Release" +description = "CoreDNS-1.6.0 Release Notes." +tags = ["Release", "1.6.0", "Notes"] +release = "1.6.0" +date = 2019-07-28T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.0](https://github.com/coredns/coredns/releases/tag/v1.6.0). + +The `-cpu` flag is removed from this version. + +This release sports changes in the *file* plugin. A speed up in the *log* plugin and fixes in the +*cache* and *hosts* plugins. + +Upcoming deprecation: the kubernetes *federation* plugin [will be moved +to](https://github.com/coredns/coredns/issues/3041) github.com/coredns/federation. This is likely to +happen in CoreDNS 1.7.0. + +# Plugins + +* The [*file*](/plugins/file) got lot of bug fixes and it now loads zones lazily on start, i.e. if the zonefile + does not exist, it keeps trying with every reload period. +* The [*cache*](/plugins/cache) fixes a race. +* Multiple fixes in the [*route53*](/plugins/route53) plugin. +* And the [*kubernetes*](/plugins/kubernetes) removes the `resyncperiod` option. +* The [*host*](/plugins/host) appended entries from /etc/hosts on each (re-)parse, instead of + overwriting them. +* Speed ups in the [*log*](/plugins/log) plugin. + +## Brought to You By + +Anshul Sharma, +Charlie Vieth, +Chris O'Haver, +Christian Muehlhaeuser, +Erfan Besharat, +Jintao Zhang, +Mat Lowery, +Miek Gieben, +Ruslan Drozhdzh, +Yong Tang. + +## Noteworthy Changes + +* core: Scrub: TC bit is always set (https://github.com/coredns/coredns/pull/3001) +* pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) +* pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) +* plugin/file: Fix setting ReloadInterval (https://github.com/coredns/coredns/pull/3017) +* plugin/file: Make non-existent file non-fatal (https://github.com/coredns/coredns/pull/2955) +* plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) +* plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) +* plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) +* plugin/host: don't append the names when reparsing hosts file (https://github.com/coredns/coredns/pull/3045) +* plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) +* plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) +* plugin/metrics: Fix response_rcode_count_total metric (https://github.com/coredns/coredns/pull/3029) +* plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) +* plugin/route53: Fix IAM credential file (https://github.com/coredns/coredns/pull/2983) +* plugin/route53: Fix multiple credentials in route53 (https://github.com/coredns/coredns/pull/2859) diff --git a/notes/coredns-1.6.1.md b/notes/coredns-1.6.1.md new file mode 100644 index 00000000000..76db2d11759 --- /dev/null +++ b/notes/coredns-1.6.1.md @@ -0,0 +1,37 @@ ++++ +title = "CoreDNS-1.6.1 Release" +description = "CoreDNS-1.6.1 Release Notes." +tags = ["Release", "1.6.1", "Notes"] +release = "1.6.1" +date = 2019-08-02T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.1](https://github.com/coredns/coredns/releases/tag/v1.6.1). + +This is a small (bug fix) release. + +# Plugins + +* Fix a panic in the [*hosts*](/plugins/hosts) plugin. +* The [*reload*](/plugins/reload) now detects changes in files imported from the main Corefile. +* [*route53*](/plugins/route53) increases the paging size when talking to the AWS API, this + decreases the chances of getting throttled. + +## Brought to You By + +Alan, +AllenZMC, +dzzg, +Erik Wilson, +Matt Kulka, +Miek Gieben, +Yong Tang. + +## Noteworthy Changes + +core: log panics (https://github.com/coredns/coredns/pull/3072) +plugin/hosts: create inline map in setup (https://github.com/coredns/coredns/pull/3071) +plugin/reload: Graceful reload of imported files (https://github.com/coredns/coredns/pull/3068) +plugin/route53: Increase ListResourceRecordSets paging size. (https://github.com/coredns/coredns/pull/3073) diff --git a/notes/coredns-1.6.2.md b/notes/coredns-1.6.2.md new file mode 100644 index 00000000000..f942de7b3fe --- /dev/null +++ b/notes/coredns-1.6.2.md @@ -0,0 +1,41 @@ ++++ +title = "CoreDNS-1.6.2 Release" +description = "CoreDNS-1.6.2 Release Notes." +tags = ["Release", "1.6.2", "Notes"] +release = "1.6.2" +date = 2019-08-13T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.2](https://github.com/coredns/coredns/releases/tag/v1.6.2). + +This is a bug fix release, but it also features a new plugin called [*azure*](/plugins/azure). + +It's compiled with Go 1.12.8 that incorporates fixes for HTTP/2 that may impact you if you use +[DoH](https://tools.ietf.org/html/rfc8484). + +# Plugins + +* Add [*azure*](/plugins/azure) to facilitate serving records from Microsoft Azure. +* Make the refresh frequency adjustable in the [*route53*](/plugins/route53) plugin. +* Fix the handling of truncated responses in [*forward*](/plugins/forward). + +## Brought to You By + +Andrey Meshkov, +Chris O'Haver, +Darshan Chaudhary, +ethan, +Matt Kulka +and +Miek Gieben. + +## Noteworthy Changes + +* plugin/azure: Add plugin for Azure DNS (https://github.com/coredns/coredns/pull/2945) +* plugin/forward: Fix handling truncated responses in forward (https://github.com/coredns/coredns/pull/3110) +* plugin/kubernetes: Don't do a zone transfer for NS requests (https://github.com/coredns/coredns/pull/3098) +* plugin/kubernetes: fix NXDOMAIN/NODATA fallthough case (https://github.com/coredns/coredns/pull/3118) +* plugin/route53: make refresh frequency adjustable (https://github.com/coredns/coredns/pull/3083) +* plugin/route53: Various updates (https://github.com/coredns/coredns/pull/3108) diff --git a/notes/coredns-1.6.3.md b/notes/coredns-1.6.3.md new file mode 100644 index 00000000000..10a918277b4 --- /dev/null +++ b/notes/coredns-1.6.3.md @@ -0,0 +1,66 @@ ++++ +title = "CoreDNS-1.6.3 Release" +description = "CoreDNS-1.6.3 Release Notes." +tags = ["Release", "1.6.3", "Notes"] +release = "1.6.3" +date = 2019-08-31T07:30:47+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.3](https://github.com/coredns/coredns/releases/tag/v1.6.3). + +In this release we have moved the *federation* plugin to +[github.com/coredns/federation](https://github.com/coredns/federation), but it is still fully +functional in this version. In version 1.7.0 we expect to deprecate it. + +Further more a slew a spelling corrections, and other minor improvements and polish. **And** two new +plugins: *clouddns* and *sign*. + +# Plugins + +* [*clouddns*](/plugins/clouddns) to enable serving zone data from GCP Cloud DNS. +* [*sign*](/plugins/sign) that (DNSSEC) signs your zonefiles (in its most basic form). +* [*file*](/plugins/file) various update, plug a memory leak when doing zone transfers, among other + things. + +We've removed the time stamping from `pkg/log` as timestamps are *also* added by the logging +aggregators, like `journald` or inside Kubernetes. And a small ASCII art logo is now printed when +CoreDNS starts up. + +## Brought to You By + +AllenZMC, +Chris Aniszczyk, +Chris O'Haver, +Cricket Liu, +Guangming Wang, +Julien Garcia Gonzalez, +li mengyang, +Miek Gieben, +Muhammad Falak R Wani, +Palash Nigam, +Sakura, +wwgfhf, +xieyanker, +Xigang Wang, +Yevgeny Pats, +Yong Tang, +zhangguoyan, +陈谭军. + + +## Noteworthy Changes + +* fuzzing: Add Continuous Fuzzing Integration to Fuzzit (https://github.com/coredns/coredns/pull/3093) +* pkg/log: remove timestamp (https://github.com/coredns/coredns/pull/3218) +* plugin/clouddns: Add Google Cloud DNS plugin (https://github.com/coredns/coredns/pull/3011) +* plugin/federation: Move federation plugin to github.com/coredns/federation (https://github.com/coredns/coredns/pull/3139) +* plugin/file: close reader for reload (https://github.com/coredns/coredns/pull/3196) +* plugin/file: less notify logging spam (https://github.com/coredns/coredns/pull/3212) +* plugin/file: respond correctly to IXFR message (https://github.com/coredns/coredns/pull/3177) +* plugin/file: rework outgoing axfr (https://github.com/coredns/coredns/pull/3227) +* plugin/{health,ready}: return standardized text for ready and health endpoint (https://github.com/coredns/coredns/pull/3195) +* plugin/k8s_external handle NS records (https://github.com/coredns/coredns/pull/3160) +* plugin/kubernetes: handle NS records (https://github.com/coredns/coredns/pull/3160) +* startup: add logo (https://github.com/coredns/coredns/pull/3230) diff --git a/notes/coredns-1.6.4.md b/notes/coredns-1.6.4.md new file mode 100644 index 00000000000..3a8a4bc4d1e --- /dev/null +++ b/notes/coredns-1.6.4.md @@ -0,0 +1,41 @@ ++++ +title = "CoreDNS-1.6.4 Release" +description = "CoreDNS-1.6.4 Release Notes." +tags = ["Release", "1.6.4", "Notes"] +release = "1.6.4" +date = 2019-09-27T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.4](https://github.com/coredns/coredns/releases/tag/v1.6.4). + +Various code cleanups and documentation improvements. We've added one new plugin: *acl*, that allows +blocking requests. + +# Plugins + +* [*acl*](/plugins/acl) block request from IPs or IP ranges. +* [*kubernetes*](/plugins/kubernetes) received some bug fixes, see below for specific PRs. +* [*hosts*](/plugins/hosts) exports metrics on the number of entries and last reload time. + +## Brought to You By + +An Xiao, +Chris O'Haver, +Cricket Liu, +Guangming Wang, +Kasisnu, +li mengyang, +Miek Gieben, +orangelynx, +xieyanker, +yeya24, +Yong Tang. + +## Noteworthy Changes + +* plugin/hosts: add host metrics (https://github.com/coredns/coredns/pull/3277) +* plugin/kubernetes: Don't duplicate service record for every port (https://github.com/coredns/coredns/pull/3240) +* plugin/kubernetes: Handle multiple local IPs and bind (https://github.com/coredns/coredns/pull/3208) +* Add plugin ACL for source IP filtering (https://github.com/coredns/coredns/pull/3103) diff --git a/notes/coredns-1.6.5.md b/notes/coredns-1.6.5.md new file mode 100644 index 00000000000..bb426a99cc2 --- /dev/null +++ b/notes/coredns-1.6.5.md @@ -0,0 +1,52 @@ ++++ +title = "CoreDNS-1.6.5 Release" +description = "CoreDNS-1.6.5 Release Notes." +tags = ["Release", "1.6.5", "Notes"] +release = "1.6.5" +date = 2019-11-06T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.5](https://github.com/coredns/coredns/releases/tag/v1.6.5). + +A fairly small release that polishes various plugins and fixes a bunch of bugs. + +# Plugins + +A new plugin [*transfer*](/plugins/transfer) that encapsulates the zone transfer knowledge and code +in one place. This makes it easier to add this functionality to new plugins. Plugins that already +implement zone transfers are expected to move to it in the 1.7.0 release. + +* [*forward*](/plugins/forward) don't block on returning sockets; instead timeout and drop the + socket on the floor, this makes each go-routine guarantee to exit. +* [*kubernetes*](/plugins/kubernetes) adds metrics to measure kubernetes control plane latency, see + documentation for details. +* [*file*](/plugins/file) fixes a panic when comparing domains names. + +## Brought to You By + +Chris O'Haver, +Erfan Besharat, +Hauke Löffler, +Ingo Gottwald, +janluk, +Miek Gieben, +Uladzimir Trehubenka, +Yong Tang, +yuxiaobo96. + +## Noteworthy Changes + +* core: Make request.Request smaller (https://github.com/coredns/coredns/pull/3351) +* pkg/log: Add Clear to stop debug logging (https://github.com/coredns/coredns/pull/3372) +* plugin/cache: move goroutine closure to separate function to save memory (https://github.com/coredns/coredns/pull/3353) +* plugin/clouddns: remove initialization from init (https://github.com/coredns/coredns/pull/3349) +* plugin/erratic: doc and zone transfer (https://github.com/coredns/coredns/pull/3340) +* plugin/file: fix panic in miekg/dns.CompareDomainName() (https://github.com/coredns/coredns/pull/3337) +* plugin/forward: make Yield not block (https://github.com/coredns/coredns/pull/3336) +* plugin/forward: Move map to array (https://github.com/coredns/coredns/pull/3339) +* plugin/kubernetes: Measure and expose DNS programming latency from Kubernetes plugin. (https://github.com/coredns/coredns/pull/3171) +* plugin/route53: Remove amazon initialization from init (https://github.com/coredns/coredns/pull/3348) +* plugin/transfer: Zone transfer plugin (https://github.com/coredns/coredns/pull/3223) +* plugins: Add MustNormalize (https://github.com/coredns/coredns/pull/3385) diff --git a/notes/coredns-1.6.6.md b/notes/coredns-1.6.6.md new file mode 100644 index 00000000000..d29816c13fe --- /dev/null +++ b/notes/coredns-1.6.6.md @@ -0,0 +1,50 @@ ++++ +title = "CoreDNS-1.6.6 Release" +description = "CoreDNS-1.6.6 Release Notes." +tags = ["Release", "1.6.6", "Notes"] +release = "1.6.6" +date = 2019-12-11T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.6](https://github.com/coredns/coredns/releases/tag/v1.6.6). + +A fairly small release that polishes various plugins and fixes a bunch of bugs. + + +# Security + +The github.com/miekg/dns has been updated to v1.1.25 to fix a DNS related security +vulnerability (https://github.com/miekg/dns/issues/1043). + +# Plugins + +A new plugin [*bufsize*](/plugin/bufsize) has been added that prevents IP fragmentation +for the DNS Flag Day 2020 and to deal with DNS vulnerabilities. + +* [*cache*](/plugin/cache) added a `serve_stale` option similar to `unbound`'s `serve_expired`. +* [*sign*](/plugin/sign) fix signing of authoritative data that we are not authoritative for. +* [*transfer*](/plugin/transfer) fixed calling wg.Add in main goroutine to avoid race conditons. + +## Brought to You By + +Chris O'Haver +Gonzalo Paniagua Javier +Guangming Wang +Kohei Yoshida +Miciah Dashiel Butler Masters +Miek Gieben +Yong Tang +Zou Nengren + +## Noteworthy Changes + +* plugin/bufsize: A new bufsize plugin to prevent IP fragmentation and DNS Flag Day 2020 (https://github.com/coredns/coredns/pull/3401) +* plugin/transfer: Fixed calling wg.Add in main goroutine to avoid race conditions (https://github.com/coredns/coredns/pull/3433) +* plugin/pprof: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3454) +* plugin/health: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3473) +* plugin/redy: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3473) +* plugin/cache: Added a `serve_stale` option similar to `unbound`'s `serve_expired` (https://github.com/coredns/coredns/pull/3468) +* plugin/sign: Fix signing of authoritative data (https://github.com/coredns/coredns/pull/3479) +* pkg/reuseport: Move the core server listening functions to a new package (https://github.com/coredns/coredns/pull/3455) diff --git a/owners_generate.go b/owners_generate.go index e198f435f85..cc62082950e 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -5,59 +5,35 @@ package main import ( + "bufio" "fmt" "io/ioutil" "log" "os" - "path" - "path/filepath" "sort" - - "gopkg.in/yaml.v2" + "strings" ) func main() { - o := map[string]struct{}{} - // top-level OWNERS file - o, err := owners("OWNERS", o) + o, err := owners("CODEOWNERS") if err != nil { log.Fatal(err) } - // each of the plugins, in case someone is not in the top-level one - err = filepath.Walk("plugin", - func(p string, i os.FileInfo, err error) error { - if err != nil { - return err - } - if i.IsDir() { - return nil - } - if path.Base(p) != "OWNERS" { - return nil - } - o, err = owners(p, o) - return err - }) - - // sort it and format it - list := []string{} - for k := range o { - list = append(list, k) - } - sort.Strings(list) golist := `package chaos // Owners are all GitHub handlers of all maintainers. var Owners = []string{` c := ", " - for i, a := range list { - if i == len(list)-1 { + for i, a := range o { + if i == len(o)-1 { c = "}" } golist += fmt.Sprintf("%q%s", a, c) } + // to prevent `No newline at end of file` with gofmt + golist += "\n" if err := ioutil.WriteFile("plugin/chaos/zowners.go", []byte(golist), 0644); err != nil { log.Fatal(err) @@ -65,27 +41,50 @@ var Owners = []string{` return } -// owners parses a owner file without knowning a whole lot about its structure. -func owners(path string, owners map[string]struct{}) (map[string]struct{}, error) { - file, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - c := yaml.MapSlice{} - err = yaml.Unmarshal(file, &c) +func owners(path string) ([]string, error) { + // simple line, by line based format + // + // # In this example, @doctocat owns any files in the build/logs + // # directory at the root of the repository and any of its + // # subdirectories. + // /build/logs/ @doctocat + f, err := os.Open(path) if err != nil { return nil, err } - for _, mi := range c { - key, ok := mi.Key.(string) - if !ok { + scanner := bufio.NewScanner(f) + users := map[string]struct{}{} + for scanner.Scan() { + text := scanner.Text() + if len(text) == 0 { continue } - if key == "approvers" { - for _, k := range mi.Value.([]interface{}) { - owners[k.(string)] = struct{}{} + if text[0] == '#' { + continue + } + ele := strings.Fields(text) + if len(ele) == 0 { + continue + } + + // ok ele[0] is the path, the rest are (in our case) github usernames prefixed with @ + for _, s := range ele[1:] { + if len(s) <= 1 { + continue } + users[s[1:]] = struct{}{} + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + u := []string{} + for k := range users { + if strings.HasPrefix(k, "@") { + k = k[1:] } + u = append(u, k) } - return owners, nil + sort.Strings(u) + return u, nil } diff --git a/plugin.cfg b/plugin.cfg index 68a7f4dcc80..740d9bbaf7d 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -1,20 +1,20 @@ -# Directives are registered in the order they should be -# executed. +# Directives are registered in the order they should be executed. # -# Ordering is VERY important. Every plugin will -# feel the effects of all other plugin below -# (after) them during a request, but they must not -# care what plugin above them are doing. +# Ordering is VERY important. Every plugin will feel the effects of all other +# plugin below (after) them during a request, but they must not care what plugin +# above them are doing. -# How to rebuild with updated plugin configurations: -# Modify the list below and run `go gen && go build` +# How to rebuild with updated plugin configurations: Modify the list below and +# run `go generate && go build` -# The parser takes the input format of +# The parser takes the input format of: +# # : # Or # : # # External plugin example: +# # log:github.com/coredns/coredns/plugin/log # Local plugin example: # log:log @@ -24,6 +24,7 @@ cancel:cancel tls:tls reload:reload nsid:nsid +bufsize:bufsize root:root bind:bind debug:debug @@ -35,6 +36,7 @@ prometheus:metrics errors:errors log:log dnstap:dnstap +acl:acl any:any chaos:chaos loadbalance:loadbalance @@ -44,9 +46,12 @@ rewrite:rewrite dnssec:dnssec autopath:autopath template:template +transfer:transfer hosts:hosts route53:route53 -federation:federation +azure:azure +clouddns:clouddns +federation:github.com/coredns/federation k8s_external:k8s_external kubernetes:kubernetes file:file @@ -59,3 +64,4 @@ grpc:grpc erratic:erratic whoami:whoami on:github.com/caddyserver/caddy/onevent +sign:sign diff --git a/plugin.md b/plugin.md index 7b7655022b9..b85a4c28a9e 100644 --- a/plugin.md +++ b/plugin.md @@ -64,6 +64,11 @@ a *Metrics* section detailing the metrics. If the plugin supports signalling readiness it should have a *Ready* section detailing how it works, and implement the `ready.Readiness` interface. +## Opening Sockets + +See the plugin/pkg/reuseport for `Listen` and `ListenPacket` functions. Using these functions makes +your plugin handle reload events better. + ## Documentation Each plugin should have a README.md explaining what the plugin does and how it is configured. The @@ -117,6 +122,16 @@ The `fallthrough` directive should optionally accept a list of zones. Only queri in one of those zones should be allowed to fallthrough. See `plugin/pkg/fallthrough` for the implementation. +## General Guidelines + +Some general guidelines: + +* logging time duration should be done in seconds (call the `Seconds()` method on any duration). +* keep logging to a minimum. +* call the main config parse function just `parse`. +* try to minimize the number of knobs in the configuration. +* use `plugin.Error()` to wrap errors returned from the `setup` function. + ## Qualifying for Main Repo Plugins for CoreDNS can live out-of-tree, `plugin.cfg` defaults to CoreDNS' repo but other diff --git a/plugin/acl/README.md b/plugin/acl/README.md new file mode 100644 index 00000000000..9a964db2190 --- /dev/null +++ b/plugin/acl/README.md @@ -0,0 +1,68 @@ +# acl + +*acl* - enforces access control policies on source ip and prevents unauthorized access to DNS servers. + +## Description + +With `acl` enabled, users are able to block suspicious DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. + +This plugin can be used multiple times per Server Block. + +## Syntax + +``` +acl [ZONES...] { + ACTION [type QTYPE...] [net SOURCE...] +} +``` + +- **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block are used. +- **ACTION** (*allow* or *block*) defines the way to deal with DNS queries matched by this rule. The default action is *allow*, which means a DNS query not matched by any rules will be allowed to recurse. +- **QTYPE** is the query type to match for the requests to be allowed or blocked. Common resource record types are supported. `*` stands for all record types. The default behavior for an omitted `type QTYPE...` is to match all kinds of DNS queries (same as `type *`). +- **SOURCE** is the source IP address to match for the requests to be allowed or blocked. Typical CIDR notation and single IP address are supported. `*` stands for all possible source IP addresses. + +## Examples + +To demonstrate the usage of plugin acl, here we provide some typical examples. + +Block all DNS queries with record type A from 192.168.0.0/16: + +~~~ corefile +. { + acl { + block type A net 192.168.0.0/16 + } +} +~~~ + +Block all DNS queries from 192.168.0.0/16 except for 192.168.1.0/24: + +~~~ corefile +. { + acl { + allow net 192.168.1.0/24 + block net 192.168.0.0/16 + } +} +~~~ + +Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: + +~~~ corefile +. { + acl { + allow net 192.168.0.0/16 192.168.1.0/24 + block + } +} +~~~ + +Block all DNS queries from 192.168.1.0/24 towards a.example.org: + +~~~ corefile +example.org { + acl a.example.org { + block net 192.168.1.0/24 + } +} +~~~ diff --git a/plugin/acl/acl.go b/plugin/acl/acl.go new file mode 100644 index 00000000000..ce7b041cbb1 --- /dev/null +++ b/plugin/acl/acl.go @@ -0,0 +1,112 @@ +package acl + +import ( + "context" + "net" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" + "github.com/coredns/coredns/request" + + "github.com/infobloxopen/go-trees/iptree" + "github.com/miekg/dns" +) + +// ACL enforces access control policies on DNS queries. +type ACL struct { + Next plugin.Handler + + Rules []rule +} + +// rule defines a list of Zones and some ACL policies which will be +// enforced on them. +type rule struct { + zones []string + policies []policy +} + +// action defines the action against queries. +type action int + +// policy defines the ACL policy for DNS queries. +// A policy performs the specified action (block/allow) on all DNS queries +// matched by source IP or QTYPE. +type policy struct { + action action + qtypes map[uint16]struct{} + filter *iptree.Tree +} + +const ( + // actionNone does nothing on the queries. + actionNone = iota + // actionAllow allows authorized queries to recurse. + actionAllow + // actionBlock blocks unauthorized queries towards protected DNS zones. + actionBlock +) + +// ServeDNS implements the plugin.Handler interface. +func (a ACL) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + +RulesCheckLoop: + for _, rule := range a.Rules { + // check zone. + zone := plugin.Zones(rule.zones).Matches(state.Name()) + if zone == "" { + continue + } + + action := matchWithPolicies(rule.policies, w, r) + switch action { + case actionBlock: + { + m := new(dns.Msg) + m.SetRcode(r, dns.RcodeRefused) + w.WriteMsg(m) + RequestBlockCount.WithLabelValues(metrics.WithServer(ctx), zone).Inc() + return dns.RcodeSuccess, nil + } + case actionAllow: + { + break RulesCheckLoop + } + } + } + + RequestAllowCount.WithLabelValues(metrics.WithServer(ctx)).Inc() + return plugin.NextOrFailure(state.Name(), a.Next, ctx, w, r) +} + +// matchWithPolicies matches the DNS query with a list of ACL polices and returns suitable +// action against the query. +func matchWithPolicies(policies []policy, w dns.ResponseWriter, r *dns.Msg) action { + state := request.Request{W: w, Req: r} + + ip := net.ParseIP(state.IP()) + qtype := state.QType() + for _, policy := range policies { + // dns.TypeNone matches all query types. + _, matchAll := policy.qtypes[dns.TypeNone] + _, match := policy.qtypes[qtype] + if !matchAll && !match { + continue + } + + _, contained := policy.filter.GetByIP(ip) + if !contained { + continue + } + + // matched. + return policy.action + } + return actionNone +} + +// Name implements the plugin.Handler interface. +func (a ACL) Name() string { + return "acl" +} diff --git a/plugin/acl/acl_test.go b/plugin/acl/acl_test.go new file mode 100644 index 00000000000..ff3d86e1a0b --- /dev/null +++ b/plugin/acl/acl_test.go @@ -0,0 +1,394 @@ +package acl + +import ( + "context" + "testing" + + "github.com/coredns/coredns/plugin/test" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" +) + +type testResponseWriter struct { + test.ResponseWriter + Rcode int +} + +func (t *testResponseWriter) setRemoteIP(ip string) { + t.RemoteIP = ip +} + +// WriteMsg implement dns.ResponseWriter interface. +func (t *testResponseWriter) WriteMsg(m *dns.Msg) error { + t.Rcode = m.Rcode + return nil +} + +func NewTestControllerWithZones(input string, zones []string) *caddy.Controller { + ctr := caddy.NewTestController("dns", input) + ctr.ServerBlockKeys = append(ctr.ServerBlockKeys, zones...) + return ctr +} + +func TestACLServeDNS(t *testing.T) { + type args struct { + domain string + sourceIP string + qtype uint16 + } + tests := []struct { + name string + config string + zones []string + args args + wantRcode int + wantErr bool + }{ + // IPv4 tests. + { + "Blacklist 1 BLOCKED", + `acl example.org { + block type A net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 1 ALLOWED", + `acl example.org { + block type A net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.167.0.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 2 BLOCKED", + ` + acl example.org { + block type * net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeAAAA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 BLOCKED", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 ALLOWED", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeAAAA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 4 Single IP BLOCKED", + `acl example.org { + block type A net 192.168.1.2 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 4 Single IP ALLOWED", + `acl example.org { + block type A net 192.168.1.2 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.1.3", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Whitelist 1 ALLOWED", + `acl example.org { + allow net 192.168.0.0/16 + block + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Whitelist 1 REFUSED", + `acl example.org { + allow type * net 192.168.0.0/16 + block + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 REFUSED", + `acl a.example.org { + block type * net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 ALLOWED", + `acl a.example.org { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "www.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 2 REFUSED", + `acl { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 2 ALLOWED", + `acl { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.com.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 3 REFUSED", + `acl a.example.org { + block net 192.168.1.0/24 + } + acl b.example.org { + block type * net 192.168.2.0/24 + }`, + []string{"example.org"}, + args{ + "b.example.org.", + "192.168.2.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 3 ALLOWED", + `acl a.example.org { + block net 192.168.1.0/24 + } + acl b.example.org { + block net 192.168.2.0/24 + }`, + []string{"example.org"}, + args{ + "b.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + // IPv6 tests. + { + "Blacklist 1 BLOCKED IPv6", + `acl example.org { + block type A net 2001:db8:abcd:0012::0/64 + }`, + []string{}, + args{ + "www.example.org.", + "2001:db8:abcd:0012::1230", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 1 ALLOWED IPv6", + `acl example.org { + block type A net 2001:db8:abcd:0012::0/64 + }`, + []string{}, + args{ + "www.example.org.", + "2001:db8:abcd:0013::0", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 2 BLOCKED IPv6", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 Single IP BLOCKED IPv6", + `acl example.org { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 Single IP ALLOWED IPv6", + `acl example.org { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7335", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 1 REFUSED IPv6", + `acl a.example.org { + block type * net 2001:db8:abcd:0012::0/64 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "2001:db8:abcd:0012:2019::0", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 ALLOWED IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + }`, + []string{"example.org"}, + args{ + "www.example.org.", + "2001:db8:abcd:0012:2019::0", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + } + + ctx := context.Background() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctr := NewTestControllerWithZones(tt.config, tt.zones) + a, err := parse(ctr) + a.Next = test.NextHandler(dns.RcodeSuccess, nil) + if err != nil { + t.Errorf("Error: Cannot parse acl from config: %v", err) + return + } + + w := &testResponseWriter{} + m := new(dns.Msg) + w.setRemoteIP(tt.args.sourceIP) + m.SetQuestion(tt.args.domain, tt.args.qtype) + _, err = a.ServeDNS(ctx, w, m) + if (err != nil) != tt.wantErr { + t.Errorf("Error: acl.ServeDNS() error = %v, wantErr %v", err, tt.wantErr) + return + } + if w.Rcode != tt.wantRcode { + t.Errorf("Error: acl.ServeDNS() Rcode = %v, want %v", w.Rcode, tt.wantRcode) + } + }) + } +} diff --git a/plugin/acl/metrics.go b/plugin/acl/metrics.go new file mode 100644 index 00000000000..442ea23744e --- /dev/null +++ b/plugin/acl/metrics.go @@ -0,0 +1,24 @@ +package acl + +import ( + "github.com/coredns/coredns/plugin" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // RequestBlockCount is the number of DNS requests being blocked. + RequestBlockCount = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "dns", + Name: "request_block_count_total", + Help: "Counter of DNS requests being blocked.", + }, []string{"server", "zone"}) + // RequestAllowCount is the number of DNS requests being Allowed. + RequestAllowCount = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "dns", + Name: "request_allow_count_total", + Help: "Counter of DNS requests being allowed.", + }, []string{"server"}) +) diff --git a/plugin/acl/setup.go b/plugin/acl/setup.go new file mode 100644 index 00000000000..74973771408 --- /dev/null +++ b/plugin/acl/setup.go @@ -0,0 +1,161 @@ +package acl + +import ( + "net" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" + + "github.com/caddyserver/caddy" + "github.com/infobloxopen/go-trees/iptree" + "github.com/miekg/dns" +) + +func init() { plugin.Register("acl", setup) } + +func newDefaultFilter() *iptree.Tree { + defaultFilter := iptree.NewTree() + _, IPv4All, _ := net.ParseCIDR("0.0.0.0/0") + _, IPv6All, _ := net.ParseCIDR("::/0") + defaultFilter.InplaceInsertNet(IPv4All, struct{}{}) + defaultFilter.InplaceInsertNet(IPv6All, struct{}{}) + return defaultFilter +} + +func setup(c *caddy.Controller) error { + a, err := parse(c) + if err != nil { + return plugin.Error("acl", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + a.Next = next + return a + }) + + // Register all metrics. + c.OnStartup(func() error { + metrics.MustRegister(c, RequestBlockCount, RequestAllowCount) + return nil + }) + return nil +} + +func parse(c *caddy.Controller) (ACL, error) { + a := ACL{} + for c.Next() { + r := rule{} + r.zones = c.RemainingArgs() + if len(r.zones) == 0 { + // if empty, the zones from the configuration block are used. + r.zones = make([]string, len(c.ServerBlockKeys)) + copy(r.zones, c.ServerBlockKeys) + } + for i := range r.zones { + r.zones[i] = plugin.Host(r.zones[i]).Normalize() + } + + for c.NextBlock() { + p := policy{} + + action := strings.ToLower(c.Val()) + if action == "allow" { + p.action = actionAllow + } else if action == "block" { + p.action = actionBlock + } else { + return a, c.Errf("unexpected token %q; expect 'allow' or 'block'", c.Val()) + } + + p.qtypes = make(map[uint16]struct{}) + p.filter = iptree.NewTree() + + hasTypeSection := false + hasNetSection := false + + remainingTokens := c.RemainingArgs() + for len(remainingTokens) > 0 { + if !isPreservedIdentifier(remainingTokens[0]) { + return a, c.Errf("unexpected token %q; expect 'type | net'", remainingTokens[0]) + } + section := strings.ToLower(remainingTokens[0]) + + i := 1 + var tokens []string + for ; i < len(remainingTokens) && !isPreservedIdentifier(remainingTokens[i]); i++ { + tokens = append(tokens, remainingTokens[i]) + } + remainingTokens = remainingTokens[i:] + + if len(tokens) == 0 { + return a, c.Errf("no token specified in %q section", section) + } + + switch section { + case "type": + hasTypeSection = true + for _, token := range tokens { + if token == "*" { + p.qtypes[dns.TypeNone] = struct{}{} + break + } + qtype, ok := dns.StringToType[token] + if !ok { + return a, c.Errf("unexpected token %q; expect legal QTYPE", token) + } + p.qtypes[qtype] = struct{}{} + } + case "net": + hasNetSection = true + for _, token := range tokens { + if token == "*" { + p.filter = newDefaultFilter() + break + } + token = normalize(token) + _, source, err := net.ParseCIDR(token) + if err != nil { + return a, c.Errf("illegal CIDR notation %q", token) + } + p.filter.InplaceInsertNet(source, struct{}{}) + } + default: + return a, c.Errf("unexpected token %q; expect 'type | net'", section) + } + } + + // optional `type` section means all record types. + if !hasTypeSection { + p.qtypes[dns.TypeNone] = struct{}{} + } + + // optional `net` means all ip addresses. + if !hasNetSection { + p.filter = newDefaultFilter() + } + + r.policies = append(r.policies, p) + } + a.Rules = append(a.Rules, r) + } + return a, nil +} + +func isPreservedIdentifier(token string) bool { + identifier := strings.ToLower(token) + return identifier == "type" || identifier == "net" +} + +// normalize appends '/32' for any single IPv4 address and '/128' for IPv6. +func normalize(rawNet string) string { + if idx := strings.IndexAny(rawNet, "/"); idx >= 0 { + return rawNet + } + + if idx := strings.IndexAny(rawNet, ":"); idx >= 0 { + return rawNet + "/128" + } + return rawNet + "/32" +} diff --git a/plugin/acl/setup_test.go b/plugin/acl/setup_test.go new file mode 100644 index 00000000000..f48da3f2466 --- /dev/null +++ b/plugin/acl/setup_test.go @@ -0,0 +1,245 @@ +package acl + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetup(t *testing.T) { + tests := []struct { + name string + config string + wantErr bool + }{ + // IPv4 tests. + { + "Blacklist 1", + `acl { + block type A net 192.168.0.0/16 + }`, + false, + }, + { + "Blacklist 2", + `acl { + block type * net 192.168.0.0/16 + }`, + false, + }, + { + "Blacklist 3", + `acl { + block type A net * + }`, + false, + }, + { + "Blacklist 4", + `acl { + allow type * net 192.168.1.0/24 + block type * net 192.168.0.0/16 + }`, + false, + }, + { + "Whitelist 1", + `acl { + allow type * net 192.168.0.0/16 + block type * net * + }`, + false, + }, + { + "fine-grained 1", + `acl a.example.org { + block type * net 192.168.1.0/24 + }`, + false, + }, + { + "fine-grained 2", + `acl a.example.org { + block type * net 192.168.1.0/24 + } + acl b.example.org { + block type * net 192.168.2.0/24 + }`, + false, + }, + { + "Multiple Networks 1", + `acl example.org { + block type * net 192.168.1.0/24 192.168.3.0/24 + }`, + false, + }, + { + "Multiple Qtypes 1", + `acl example.org { + block type TXT ANY CNAME net 192.168.3.0/24 + }`, + false, + }, + { + "Missing argument 1", + `acl { + block A net 192.168.0.0/16 + }`, + true, + }, + { + "Missing argument 2", + `acl { + block type net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 1", + `acl { + block type ABC net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 2", + `acl { + blck type A net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 3", + `acl { + block type A net 192.168.0/16 + }`, + true, + }, + { + "Illegal argument 4", + `acl { + block type A net 192.168.0.0/33 + }`, + true, + }, + // IPv6 tests. + { + "Blacklist 1 IPv6", + `acl { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + false, + }, + { + "Blacklist 2 IPv6", + `acl { + block type * net 2001:db8:85a3::8a2e:370:7334 + }`, + false, + }, + { + "Blacklist 3 IPv6", + `acl { + block type A + }`, + false, + }, + { + "Blacklist 4 IPv6", + `acl { + allow net 2001:db8:abcd:0012::0/64 + block net 2001:db8:abcd:0012::0/48 + }`, + false, + }, + { + "Whitelist 1 IPv6", + `acl { + allow net 2001:db8:abcd:0012::0/64 + block + }`, + false, + }, + { + "fine-grained 1 IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + }`, + false, + }, + { + "fine-grained 2 IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + } + acl b.example.org { + block net 2001:db8:abcd:0013::0/64 + }`, + false, + }, + { + "Multiple Networks 1 IPv6", + `acl example.org { + block net 2001:db8:abcd:0012::0/64 2001:db8:85a3::8a2e:370:7334/64 + }`, + false, + }, + { + "Illegal argument 1 IPv6", + `acl { + block type A net 2001::85a3::8a2e:370:7334 + }`, + true, + }, + { + "Illegal argument 2 IPv6", + `acl { + block type A net 2001:db8:85a3:::8a2e:370:7334 + }`, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctr := caddy.NewTestController("dns", tt.config) + if err := setup(ctr); (err != nil) != tt.wantErr { + t.Errorf("Error: setup() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestNormalize(t *testing.T) { + type args struct { + rawNet string + } + tests := []struct { + name string + args args + want string + }{ + { + "Network range 1", + args{"10.218.10.8/24"}, + "10.218.10.8/24", + }, + { + "IP address 1", + args{"10.218.10.8"}, + "10.218.10.8/32", + }, + { + "IPv6 address 1", + args{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalize(tt.args.rawNet); got != tt.want { + t.Errorf("Error: normalize() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/plugin/any/OWNERS b/plugin/any/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/any/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/any/README.md b/plugin/any/README.md index 1abc978030e..5b88467459d 100644 --- a/plugin/any/README.md +++ b/plugin/any/README.md @@ -3,7 +3,7 @@ ## Name -*any* - give a minimal response to ANY queries. +*any* - gives a minimal response to ANY queries. ## Description diff --git a/plugin/any/setup.go b/plugin/any/setup.go index f2d709e268b..703b8e5d2c5 100644 --- a/plugin/any/setup.go +++ b/plugin/any/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("any", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("any", setup) } func setup(c *caddy.Controller) error { a := Any{} diff --git a/plugin/auto/OWNERS b/plugin/auto/OWNERS deleted file mode 100644 index 3fc6bad85ed..00000000000 --- a/plugin/auto/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -reviewers: - - miekg - - stp-ip -approvers: - - miekg diff --git a/plugin/auto/README.md b/plugin/auto/README.md index e313c966dc9..26c232c4d17 100644 --- a/plugin/auto/README.md +++ b/plugin/auto/README.md @@ -56,8 +56,8 @@ Load `org` domains from `/etc/coredns/zones/org` and allow transfers to the inte notifies to 10.240.1.1 ~~~ corefile -. { - auto org { +org { + auto { directory /etc/coredns/zones/org transfer to * transfer to 10.240.1.1 diff --git a/plugin/auto/setup.go b/plugin/auto/setup.go index f62aa71683a..e6495b192bd 100644 --- a/plugin/auto/setup.go +++ b/plugin/auto/setup.go @@ -18,12 +18,7 @@ import ( var log = clog.NewWithPlugin("auto") -func init() { - caddy.RegisterPlugin("auto", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("auto", setup) } func setup(c *caddy.Controller) error { a, err := autoParse(c) diff --git a/plugin/auto/walk.go b/plugin/auto/walk.go index d4002a46d1c..40c62e51445 100644 --- a/plugin/auto/walk.go +++ b/plugin/auto/walk.go @@ -20,7 +20,7 @@ func (a Auto) Walk() error { toDelete[n] = true } - filepath.Walk(a.loader.directory, func(path string, info os.FileInfo, err error) error { + filepath.Walk(a.loader.directory, func(path string, info os.FileInfo, _ error) error { if info == nil || info.IsDir() { return nil } diff --git a/plugin/auto/watcher_test.go b/plugin/auto/watcher_test.go index a7013448b63..be2a0d48b17 100644 --- a/plugin/auto/watcher_test.go +++ b/plugin/auto/watcher_test.go @@ -30,12 +30,12 @@ func TestWatcher(t *testing.T) { a.Walk() - // example.org and example.com should exist - if x := len(a.Zones.Z["example.org."].All()); x != 4 { - t.Fatalf("Expected 4 RRs, got %d", x) + // example.org and example.com should exist, we have 3 apex rrs and 1 "real" record. All() returns the non-apex ones. + if x := len(a.Zones.Z["example.org."].All()); x != 1 { + t.Fatalf("Expected 1 RRs, got %d", x) } - if x := len(a.Zones.Z["example.com."].All()); x != 4 { - t.Fatalf("Expected 4 RRs, got %d", x) + if x := len(a.Zones.Z["example.com."].All()); x != 1 { + t.Fatalf("Expected 1 RRs, got %d", x) } // Now remove one file, rescan and see if it's gone. diff --git a/plugin/auto/zone.go b/plugin/auto/zone.go index c139e724198..0a12ca39f98 100644 --- a/plugin/auto/zone.go +++ b/plugin/auto/zone.go @@ -7,7 +7,7 @@ import ( "github.com/coredns/coredns/plugin/file" ) -// Zones maps zone names to a *Zone. This keep track of what we zones we have loaded at +// Zones maps zone names to a *Zone. This keeps track of what zones we have loaded at // any one time. type Zones struct { Z map[string]*file.Zone // A map mapping zone (origin) to the Zone's data. @@ -56,7 +56,7 @@ func (z *Zones) Add(zo *file.Zone, name string) { z.Unlock() } -// Remove removes the zone named name from z. It also stop the zone's reload goroutine. +// Remove removes the zone named name from z. It also stops the zone's reload goroutine. func (z *Zones) Remove(name string) { z.Lock() diff --git a/plugin/autopath/README.md b/plugin/autopath/README.md index 96b0e5beb90..86266c5fa44 100644 --- a/plugin/autopath/README.md +++ b/plugin/autopath/README.md @@ -27,7 +27,7 @@ If a plugin implements the `AutoPather` interface then it can be used. ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_autopath_success_count_total{server}` - counter of successfully autopath-ed queries. @@ -39,7 +39,7 @@ The `server` label is explained in the *metrics* plugin documentation. autopath my-resolv.conf ~~~ -Use `my-resolv.conf` as the file to get the search path from. This file only needs so have one line: +Use `my-resolv.conf` as the file to get the search path from. This file only needs to have one line: `search domain1 domain2 ...` ~~~ diff --git a/plugin/autopath/autopath.go b/plugin/autopath/autopath.go index 73882cdbd01..4db9366d2f9 100644 --- a/plugin/autopath/autopath.go +++ b/plugin/autopath/autopath.go @@ -3,9 +3,9 @@ Package autopath implements autopathing. This is a hack; it shortcuts the client's search path resolution by performing these lookups on the server... The server has a copy (via AutoPathFunc) of the client's search path and on -receiving a query it first establish if the suffix matches the FIRST configured +receiving a query it first establishes if the suffix matches the FIRST configured element. If no match can be found the query will be forwarded up the plugin -chain without interference (iff 'fallthrough' has been set). +chain without interference (if, and only if, 'fallthrough' has been set). If the query is deemed to fall in the search path the server will perform the queries with each element of the search path appended in sequence until a diff --git a/plugin/autopath/setup.go b/plugin/autopath/setup.go index 2f69a3eb2c7..94dde59533a 100644 --- a/plugin/autopath/setup.go +++ b/plugin/autopath/setup.go @@ -11,13 +11,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("autopath", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) - -} +func init() { plugin.Register("autopath", setup) } func setup(c *caddy.Controller) error { ap, mw, err := autoPathParse(c) diff --git a/plugin/azure/README.md b/plugin/azure/README.md new file mode 100644 index 00000000000..70cf9a15d73 --- /dev/null +++ b/plugin/azure/README.md @@ -0,0 +1,56 @@ +# azure + +## Name + +*azure* - enables serving zone data from Microsoft Azure DNS service. + +## Description + +The azure plugin is useful for serving zones from Microsoft Azure DNS. The *azure* plugin supports +all the DNS records supported by Azure, viz. A, AAAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT +record types. + +## Syntax + +~~~ txt +azure RESOURCE_GROUP:ZONE... { + tenant TENANT_ID + client CLIENT_ID + secret CLIENT_SECRET + subscription SUBSCRIPTION_ID + environment ENVIRONMENT + fallthrough [ZONES...] +} +~~~ + +* **RESOURCE_GROUP:ZONE** is the resource group to which the hosted zones belongs on Azure, + and **ZONE** the zone that contains data. + +* **CLIENT_ID** and **CLIENT_SECRET** are the credentials for Azure, and `tenant` specifies the + **TENANT_ID** to be used. **SUBSCRIPTION_ID** is the subscription ID. All of these are needed + to access the data in Azure. + +* `environment` specifies the Azure **ENVIRONMENT**. + +* `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. + If **ZONES** is omitted, then fallthrough happens for all zones for which the plugin is + authoritative. + +## Examples + +Enable the *azure* plugin with Azure credentials for the zone `example.org`: + +~~~ txt +example.org { + azure resource_group_foo:example.org { + tenant 123abc-123abc-123abc-123abc + client 123abc-123abc-123abc-234xyz + subscription 123abc-123abc-123abc-563abc + secret mysecret + } +} +~~~ + +## Also See + +The [Azure DNS Overview](https://docs.microsoft.com/en-us/azure/dns/dns-overview). diff --git a/plugin/azure/azure.go b/plugin/azure/azure.go new file mode 100644 index 00000000000..8432af31699 --- /dev/null +++ b/plugin/azure/azure.go @@ -0,0 +1,249 @@ +package azure + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/request" + + azuredns "github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns" + "github.com/miekg/dns" +) + +type zone struct { + id string + z *file.Zone + zone string +} + +type zones map[string][]*zone + +// Azure is the core struct of the azure plugin. +type Azure struct { + zoneNames []string + client azuredns.RecordSetsClient + upstream *upstream.Upstream + zMu sync.RWMutex + zones zones + + Next plugin.Handler + Fall fall.F +} + +// New validates the input DNS zones and initializes the Azure struct. +func New(ctx context.Context, dnsClient azuredns.RecordSetsClient, keys map[string][]string) (*Azure, error) { + zones := make(map[string][]*zone, len(keys)) + names := make([]string, len(keys)) + + for resourceGroup, znames := range keys { + for _, name := range znames { + if _, err := dnsClient.ListAllByDNSZone(context.Background(), resourceGroup, name, nil, ""); err != nil { + return nil, err + } + + fqdn := dns.Fqdn(name) + if _, ok := zones[fqdn]; !ok { + names = append(names, fqdn) + } + zones[fqdn] = append(zones[fqdn], &zone{id: resourceGroup, zone: fqdn, z: file.NewZone(fqdn, "")}) + } + } + return &Azure{ + client: dnsClient, + zones: zones, + zoneNames: names, + upstream: upstream.New(), + }, nil +} + +// Run updates the zone from azure. +func (h *Azure) Run(ctx context.Context) error { + if err := h.updateZones(ctx); err != nil { + return err + } + go func() { + for { + select { + case <-ctx.Done(): + log.Infof("Breaking out of Azure update loop: %v", ctx.Err()) + return + case <-time.After(1 * time.Minute): + if err := h.updateZones(ctx); err != nil && ctx.Err() == nil { + log.Errorf("Failed to update zones: %v", err) + } + } + } + }() + return nil +} + +func (h *Azure) updateZones(ctx context.Context) error { + errs := make([]string, 0) + for zName, z := range h.zones { + for i, hostedZone := range z { + recordSet, err := h.client.ListByDNSZone(ctx, hostedZone.id, hostedZone.zone, nil, "") + if err != nil { + errs = append(errs, fmt.Sprintf("failed to list resource records for %v from azure: %v", hostedZone.zone, err)) + } + newZ := updateZoneFromResourceSet(recordSet, zName) + newZ.Upstream = h.upstream + h.zMu.Lock() + (*z[i]).z = newZ + h.zMu.Unlock() + } + } + + if len(errs) != 0 { + return fmt.Errorf("errors updating zones: %v", errs) + } + return nil + +} + +func updateZoneFromResourceSet(recordSet azuredns.RecordSetListResultPage, zName string) *file.Zone { + newZ := file.NewZone(zName, "") + + for _, result := range *(recordSet.Response().Value) { + resultFqdn := *(result.RecordSetProperties.Fqdn) + resultTTL := uint32(*(result.RecordSetProperties.TTL)) + if result.RecordSetProperties.ARecords != nil { + for _, A := range *(result.RecordSetProperties.ARecords) { + a := &dns.A{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: resultTTL}, + A: net.ParseIP(*(A.Ipv4Address))} + newZ.Insert(a) + } + } + + if result.RecordSetProperties.AaaaRecords != nil { + for _, AAAA := range *(result.RecordSetProperties.AaaaRecords) { + aaaa := &dns.AAAA{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: resultTTL}, + AAAA: net.ParseIP(*(AAAA.Ipv6Address))} + newZ.Insert(aaaa) + } + } + + if result.RecordSetProperties.MxRecords != nil { + for _, MX := range *(result.RecordSetProperties.MxRecords) { + mx := &dns.MX{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: resultTTL}, + Preference: uint16(*(MX.Preference)), + Mx: dns.Fqdn(*(MX.Exchange))} + newZ.Insert(mx) + } + } + + if result.RecordSetProperties.PtrRecords != nil { + for _, PTR := range *(result.RecordSetProperties.PtrRecords) { + ptr := &dns.PTR{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: resultTTL}, + Ptr: dns.Fqdn(*(PTR.Ptrdname))} + newZ.Insert(ptr) + } + } + + if result.RecordSetProperties.SrvRecords != nil { + for _, SRV := range *(result.RecordSetProperties.SrvRecords) { + srv := &dns.SRV{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: resultTTL}, + Priority: uint16(*(SRV.Priority)), + Weight: uint16(*(SRV.Weight)), + Port: uint16(*(SRV.Port)), + Target: dns.Fqdn(*(SRV.Target))} + newZ.Insert(srv) + } + } + + if result.RecordSetProperties.TxtRecords != nil { + for _, TXT := range *(result.RecordSetProperties.TxtRecords) { + txt := &dns.TXT{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: resultTTL}, + Txt: *(TXT.Value)} + newZ.Insert(txt) + } + } + + if result.RecordSetProperties.NsRecords != nil { + for _, NS := range *(result.RecordSetProperties.NsRecords) { + ns := &dns.NS{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: resultTTL}, + Ns: *(NS.Nsdname)} + newZ.Insert(ns) + } + } + + if result.RecordSetProperties.SoaRecord != nil { + SOA := result.RecordSetProperties.SoaRecord + soa := &dns.SOA{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: resultTTL}, + Minttl: uint32(*(SOA.MinimumTTL)), + Expire: uint32(*(SOA.ExpireTime)), + Retry: uint32(*(SOA.RetryTime)), + Refresh: uint32(*(SOA.RefreshTime)), + Serial: uint32(*(SOA.SerialNumber)), + Mbox: dns.Fqdn(*(SOA.Email)), + Ns: *(SOA.Host)} + newZ.Insert(soa) + } + + if result.RecordSetProperties.CnameRecord != nil { + CNAME := result.RecordSetProperties.CnameRecord.Cname + cname := &dns.CNAME{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: resultTTL}, + Target: dns.Fqdn(*CNAME)} + newZ.Insert(cname) + } + } + return newZ +} + +// ServeDNS implements the plugin.Handler interface. +func (h *Azure) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + + zone := plugin.Zones(h.zoneNames).Matches(qname) + if zone == "" { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + zones, ok := h.zones[zone] // ok true if we are authoritative for the zone. + if !ok || zones == nil { + return dns.RcodeServerFailure, nil + } + + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + var result file.Result + for _, z := range zones { + h.zMu.RLock() + m.Answer, m.Ns, m.Extra, result = z.z.Lookup(ctx, state, qname) + h.zMu.RUnlock() + + // record type exists for this name (NODATA). + if len(m.Answer) != 0 || result == file.NoData { + break + } + } + + if len(m.Answer) == 0 && result != file.NoData && h.Fall.Through(qname) { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + switch result { + case file.Success: + case file.NoData: + case file.NameError: + m.Rcode = dns.RcodeNameError + case file.Delegation: + m.Authoritative = false + case file.ServerFailure: + return dns.RcodeServerFailure, nil + } + + w.WriteMsg(m) + return dns.RcodeSuccess, nil +} + +// Name implements plugin.Handler.Name. +func (h *Azure) Name() string { return "azure" } diff --git a/plugin/azure/azure_test.go b/plugin/azure/azure_test.go new file mode 100644 index 00000000000..d006f196bce --- /dev/null +++ b/plugin/azure/azure_test.go @@ -0,0 +1,180 @@ +package azure + +import ( + "context" + "reflect" + "testing" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/test" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" +) + +var demoAzure = Azure{ + Next: testHandler(), + Fall: fall.Zero, + zoneNames: []string{"example.org.", "www.example.org.", "example.org.", "sample.example.org."}, + zones: testZones(), +} + +func testZones() zones { + zones := make(map[string][]*zone) + zones["example.org."] = append(zones["example.org."], &zone{zone: "example.org."}) + newZ := file.NewZone("example.org.", "") + + for _, rr := range []string{ + "example.org. 300 IN A 1.2.3.4", + "example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334", + "www.example.org. 300 IN A 1.2.3.4", + "www.example.org. 300 IN A 1.2.3.4", + "org. 172800 IN NS ns3-06.azure-dns.org.", + "org. 300 IN SOA ns1-06.azure-dns.com. azuredns-hostmaster.microsoft.com. 1 3600 300 2419200 300", + "cname.example.org. 300 IN CNAME example.org", + "mail.example.org. 300 IN MX 10 mailserver.example.com", + "ptr.example.org. 300 IN PTR www.ptr-example.com", + "example.org. 300 IN SRV 1 10 5269 srv-1.example.com.", + "example.org. 300 IN SRV 1 10 5269 srv-2.example.com.", + "txt.example.org. 300 IN TXT \"TXT for example.org\"", + } { + r, _ := dns.NewRR(rr) + newZ.Insert(r) + } + zones["example.org."][0].z = newZ + return zones +} + +func testHandler() test.HandlerFunc { + return func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + m := new(dns.Msg) + rcode := dns.RcodeServerFailure + if qname == "example.gov." { // No records match, test fallthrough. + m.SetReply(r) + rr := test.A("example.gov. 300 IN A 2.4.6.8") + m.Answer = []dns.RR{rr} + m.Authoritative = true + rcode = dns.RcodeSuccess + } + m.SetRcode(r, rcode) + w.WriteMsg(m) + return rcode, nil + } +} + +func TestAzure(t *testing.T) { + tests := []struct { + qname string + qtype uint16 + wantRetCode int + wantAnswer []string + wantMsgRCode int + wantNS []string + expectedErr error + }{ + { + qname: "example.org.", + qtype: dns.TypeA, + wantAnswer: []string{"example.org. 300 IN A 1.2.3.4"}, + }, + { + qname: "example.org", + qtype: dns.TypeAAAA, + wantAnswer: []string{"example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334"}, + }, + { + qname: "example.org", + qtype: dns.TypeSOA, + wantAnswer: []string{"org. 300 IN SOA ns1-06.azure-dns.com. azuredns-hostmaster.microsoft.com. 1 3600 300 2419200 300"}, + }, + { + qname: "badexample.com", + qtype: dns.TypeA, + wantRetCode: dns.RcodeServerFailure, + wantMsgRCode: dns.RcodeServerFailure, + }, + { + qname: "example.gov", + qtype: dns.TypeA, + wantAnswer: []string{"example.gov. 300 IN A 2.4.6.8"}, + }, + { + qname: "example.org", + qtype: dns.TypeSRV, + wantAnswer: []string{"example.org. 300 IN SRV 1 10 5269 srv-1.example.com.", "example.org. 300 IN SRV 1 10 5269 srv-2.example.com."}, + }, + { + qname: "cname.example.org.", + qtype: dns.TypeCNAME, + wantAnswer: []string{"cname.example.org. 300 IN CNAME example.org."}, + }, + { + qname: "cname.example.org.", + qtype: dns.TypeA, + wantAnswer: []string{"cname.example.org. 300 IN CNAME example.org.", "example.org. 300 IN A 1.2.3.4"}, + }, + { + qname: "mail.example.org.", + qtype: dns.TypeMX, + wantAnswer: []string{"mail.example.org. 300 IN MX 10 mailserver.example.com."}, + }, + { + qname: "ptr.example.org.", + qtype: dns.TypePTR, + wantAnswer: []string{"ptr.example.org. 300 IN PTR www.ptr-example.com."}, + }, + { + qname: "txt.example.org.", + qtype: dns.TypeTXT, + wantAnswer: []string{"txt.example.org. 300 IN TXT \"TXT for example.org\""}, + }, + } + + for ti, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), tc.qtype) + + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + code, err := demoAzure.ServeDNS(context.Background(), rec, req) + + if err != tc.expectedErr { + t.Fatalf("Test %d: Expected error %v, but got %v", ti, tc.expectedErr, err) + } + + if code != int(tc.wantRetCode) { + t.Fatalf("Test %d: Expected returned status code %s, but got %s", ti, dns.RcodeToString[tc.wantRetCode], dns.RcodeToString[code]) + } + + if tc.wantMsgRCode != rec.Msg.Rcode { + t.Errorf("Test %d: Unexpected msg status code. Want: %s, got: %s", ti, dns.RcodeToString[tc.wantMsgRCode], dns.RcodeToString[rec.Msg.Rcode]) + } + + if len(tc.wantAnswer) != len(rec.Msg.Answer) { + t.Errorf("Test %d: Unexpected number of Answers. Want: %d, got: %d", ti, len(tc.wantAnswer), len(rec.Msg.Answer)) + } else { + for i, gotAnswer := range rec.Msg.Answer { + if gotAnswer.String() != tc.wantAnswer[i] { + t.Errorf("Test %d: Unexpected answer.\nWant:\n\t%s\nGot:\n\t%s", ti, tc.wantAnswer[i], gotAnswer) + } + } + } + + if len(tc.wantNS) != len(rec.Msg.Ns) { + t.Errorf("Test %d: Unexpected NS number. Want: %d, got: %d", ti, len(tc.wantNS), len(rec.Msg.Ns)) + } else { + for i, ns := range rec.Msg.Ns { + got, ok := ns.(*dns.SOA) + if !ok { + t.Errorf("Test %d: Unexpected NS type. Want: SOA, got: %v", ti, reflect.TypeOf(got)) + } + if got.String() != tc.wantNS[i] { + t.Errorf("Test %d: Unexpected NS.\nWant: %v\nGot: %v", ti, tc.wantNS[i], got) + } + } + } + } +} diff --git a/plugin/azure/setup.go b/plugin/azure/setup.go new file mode 100644 index 00000000000..15ebb7d6fc3 --- /dev/null +++ b/plugin/azure/setup.go @@ -0,0 +1,120 @@ +package azure + +import ( + "context" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/fall" + clog "github.com/coredns/coredns/plugin/pkg/log" + + azuredns "github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns" + azurerest "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/caddyserver/caddy" +) + +var log = clog.NewWithPlugin("azure") + +func init() { plugin.Register("azure", setup) } + +func setup(c *caddy.Controller) error { + env, keys, fall, err := parse(c) + if err != nil { + return plugin.Error("azure", err) + } + ctx := context.Background() + + dnsClient := azuredns.NewRecordSetsClient(env.Values[auth.SubscriptionID]) + if dnsClient.Authorizer, err = env.GetAuthorizer(); err != nil { + return plugin.Error("azure", err) + } + + h, err := New(ctx, dnsClient, keys) + if err != nil { + return plugin.Error("azure", err) + } + h.Fall = fall + if err := h.Run(ctx); err != nil { + return plugin.Error("azure", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) + return nil +} + +func parse(c *caddy.Controller) (auth.EnvironmentSettings, map[string][]string, fall.F, error) { + resourceGroupMapping := map[string][]string{} + resourceGroupSet := map[string]struct{}{} + azureEnv := azurerest.PublicCloud + env := auth.EnvironmentSettings{Values: map[string]string{}} + + var fall fall.F + + for c.Next() { + args := c.RemainingArgs() + + for i := 0; i < len(args); i++ { + parts := strings.SplitN(args[i], ":", 2) + if len(parts) != 2 { + return env, resourceGroupMapping, fall, c.Errf("invalid resource group/zone: %q", args[i]) + } + resourceGroup, zoneName := parts[0], parts[1] + if resourceGroup == "" || zoneName == "" { + return env, resourceGroupMapping, fall, c.Errf("invalid resource group/zone: %q", args[i]) + } + if _, ok := resourceGroupSet[args[i]]; ok { + return env, resourceGroupMapping, fall, c.Errf("conflicting zone: %q", args[i]) + } + + resourceGroupSet[args[i]] = struct{}{} + resourceGroupMapping[resourceGroup] = append(resourceGroupMapping[resourceGroup], zoneName) + } + for c.NextBlock() { + switch c.Val() { + case "subscription": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.SubscriptionID] = c.Val() + case "tenant": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.TenantID] = c.Val() + case "client": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientID] = c.Val() + case "secret": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientSecret] = c.Val() + case "environment": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientSecret] = c.Val() + var err error + if azureEnv, err = azurerest.EnvironmentFromName(c.Val()); err != nil { + return env, resourceGroupMapping, fall, c.Errf("cannot set azure environment: %q", err.Error()) + } + case "fallthrough": + fall.SetZonesFromArgs(c.RemainingArgs()) + default: + return env, resourceGroupMapping, fall, c.Errf("unknown property: %q", c.Val()) + } + } + } + + env.Values[auth.Resource] = azureEnv.ResourceManagerEndpoint + env.Environment = azureEnv + + return env, resourceGroupMapping, fall, nil +} diff --git a/plugin/azure/setup_test.go b/plugin/azure/setup_test.go new file mode 100644 index 00000000000..c0b22d5814e --- /dev/null +++ b/plugin/azure/setup_test.go @@ -0,0 +1,72 @@ +package azure + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetup(t *testing.T) { + tests := []struct { + body string + expectedError bool + }{ + {`azure`, false}, + {`azure :`, true}, + {`azure resource_set:zone`, false}, + {`azure resource_set:zone { + tenant +}`, true}, + {`azure resource_set:zone { + tenant +}`, true}, + {`azure resource_set:zone { + client +}`, true}, + {`azure resource_set:zone { + secret +}`, true}, + {`azure resource_set:zone { + subscription +}`, true}, + {`azure resource_set:zone { + upstream 10.0.0.1 +}`, true}, + + {`azure resource_set:zone { + upstream +}`, true}, + {`azure resource_set:zone { + foobar +}`, true}, + {`azure resource_set:zone { + tenant tenant_id + client client_id + secret client_secret + subscription subscription_id +}`, false}, + + {`azure resource_set:zone { + fallthrough +}`, false}, + {`azure resource_set:zone { + environment AZUREPUBLICCLOUD + }`, false}, + {`azure resource_set:zone resource_set:zone { + fallthrough + }`, true}, + {`azure resource_set:zone,zone2 { + fallthrough + }`, false}, + {`azure resource-set { + fallthrough + }`, true}, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.body) + if _, _, _, err := parse(c); (err == nil) == test.expectedError { + t.Fatalf("Unexpected errors: %v in test: %d\n\t%s", err, i, test.body) + } + } +} diff --git a/plugin/backend_lookup.go b/plugin/backend_lookup.go index 096cf806be7..9e5c9eeecbf 100644 --- a/plugin/backend_lookup.go +++ b/plugin/backend_lookup.go @@ -372,6 +372,8 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques // ... and reset state.Req.Question[0].Name = old + seen := map[string]bool{} + for _, serv := range services { what, ip := serv.HostType() switch what { @@ -380,8 +382,13 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques case dns.TypeA, dns.TypeAAAA: serv.Host = msg.Domain(serv.Key) - records = append(records, serv.NewNS(state.QName())) extra = append(extra, newAddress(serv, serv.Host, ip, what)) + ns := serv.NewNS(state.QName()) + if _, ok := seen[ns.Ns]; ok { + continue + } + seen[ns.Ns] = true + records = append(records, ns) } } return records, extra, nil diff --git a/plugin/bind/OWNERS b/plugin/bind/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/bind/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/bind/README.md b/plugin/bind/README.md index 8561d0b3c60..fec5511aba3 100644 --- a/plugin/bind/README.md +++ b/plugin/bind/README.md @@ -40,7 +40,7 @@ To allow processing DNS requests only local host on both IPv4 and IPv6 stacks, u } ~~~ -If the configuration comes up with several *bind* directives, all addresses are consolidated together: +If the configuration comes up with several *bind* plugins, all addresses are consolidated together: The following sample is equivalent to the preceding: ~~~ corefile diff --git a/plugin/bind/bind.go b/plugin/bind/bind.go index 749561a3616..cfbd36597a0 100644 --- a/plugin/bind/bind.go +++ b/plugin/bind/bind.go @@ -1,11 +1,6 @@ // Package bind allows binding to a specific interface instead of bind to all of them. package bind -import "github.com/caddyserver/caddy" +import "github.com/coredns/coredns/plugin" -func init() { - caddy.RegisterPlugin("bind", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("bind", setup) } diff --git a/plugin/bufsize/README.md b/plugin/bufsize/README.md new file mode 100644 index 00000000000..65c73ab6e56 --- /dev/null +++ b/plugin/bufsize/README.md @@ -0,0 +1,39 @@ +# bufsize +## Name +*bufsize* - sizes EDNS0 buffer size to prevent IP fragmentation. + +## Description +*bufsize* limits a requester's UDP payload size. +It prevents IP fragmentation so that to deal with DNS vulnerability. + +## Syntax +```txt +bufsize [SIZE] +``` + +**[SIZE]** is an int value for setting the buffer size. +The default value is 512, and the value must be within 512 - 4096. +Only one argument is acceptable, and it covers both IPv4 and IPv6. + +## Examples +Enable limiting the buffer size of outgoing query to the resolver (172.31.0.10): +```corefile +. { + bufsize 512 + forward . 172.31.0.10 + log +} +``` + +Enable limiting the buffer size as an authoritative nameserver: +```corefile +. { + bufsize 512 + file db.example.org + log +} +``` + +## Considerations +- Setting 1232 bytes to bufsize may avoid fragmentation on the majority of networks in use today, but it depends on the MTU of the physical network links. +- For now, if a client does not use EDNS, this plugin adds OPT RR. diff --git a/plugin/bufsize/bufsize.go b/plugin/bufsize/bufsize.go new file mode 100644 index 00000000000..1522be8948b --- /dev/null +++ b/plugin/bufsize/bufsize.go @@ -0,0 +1,31 @@ +// Package bufsize implements a plugin that modifies EDNS0 buffer size. +package bufsize + +import ( + "context" + + "github.com/coredns/coredns/plugin" + + "github.com/miekg/dns" +) + +// Bufsize implements bufsize plugin. +type Bufsize struct { + Next plugin.Handler + Size int +} + +// ServeDNS implements the plugin.Handler interface. +func (buf Bufsize) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if option := r.IsEdns0(); option != nil { + option.SetUDPSize(uint16(buf.Size)) + } else { + // If a client does not use EDNS, add it + r.SetEdns0(uint16(buf.Size), false) + } + + return plugin.NextOrFailure(buf.Name(), buf.Next, ctx, w, r) +} + +// Name implements the Handler interface. +func (buf Bufsize) Name() string { return "bufsize" } diff --git a/plugin/bufsize/bufsize_test.go b/plugin/bufsize/bufsize_test.go new file mode 100644 index 00000000000..3d714d2f194 --- /dev/null +++ b/plugin/bufsize/bufsize_test.go @@ -0,0 +1,72 @@ +package bufsize + +import ( + "context" + "testing" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/test" + "github.com/coredns/coredns/plugin/whoami" + + "github.com/miekg/dns" +) + +func TestBufsize(t *testing.T) { + em := Bufsize{ + Size: 512, + } + + tests := []struct { + next plugin.Handler + qname string + inputBufsize uint16 + outgoingBufsize uint16 + expectedErr error + }{ + // This plugin is responsible for limiting outgoing query's bufize + { + next: whoami.Whoami{}, + qname: ".", + inputBufsize: 1200, + outgoingBufsize: 512, + expectedErr: nil, + }, + // If EDNS is not enabled, this plugin adds it + { + next: whoami.Whoami{}, + qname: ".", + outgoingBufsize: 512, + expectedErr: nil, + }, + } + + for i, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), dns.TypeA) + req.Question[0].Qclass = dns.ClassINET + em.Next = tc.next + + if tc.inputBufsize != 0 { + req.SetEdns0(tc.inputBufsize, false) + } + + _, err := em.ServeDNS(context.Background(), &test.ResponseWriter{}, req) + + if err != tc.expectedErr { + t.Errorf("Test %d: Expected error is %v, but got %v", i, tc.expectedErr, err) + } + + if tc.outgoingBufsize != 0 { + for _, extra := range req.Extra { + if option, ok := extra.(*dns.OPT); ok { + b := option.UDPSize() + if b != tc.outgoingBufsize { + t.Errorf("Test %d: Expected outgoing bufsize is %d, but got %d", i, tc.outgoingBufsize, b) + } + } else { + t.Errorf("Test %d: Not found OPT RR.", i) + } + } + } + } +} diff --git a/plugin/bufsize/setup.go b/plugin/bufsize/setup.go new file mode 100644 index 00000000000..28586c76ebc --- /dev/null +++ b/plugin/bufsize/setup.go @@ -0,0 +1,52 @@ +package bufsize + +import ( + "strconv" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + + "github.com/caddyserver/caddy" +) + +func init() { plugin.Register("bufsize", setup) } + +func setup(c *caddy.Controller) error { + bufsize, err := parse(c) + if err != nil { + return plugin.Error("bufsize", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + return Bufsize{Next: next, Size: bufsize} + }) + + return nil +} + +func parse(c *caddy.Controller) (int, error) { + const defaultBufSize = 512 + for c.Next() { + args := c.RemainingArgs() + switch len(args) { + case 0: + // Nothing specified; use 512 as default + return defaultBufSize, nil + case 1: + // Specified value is needed to verify + bufsize, err := strconv.Atoi(args[0]) + if err != nil { + return -1, plugin.Error("bufsize", c.ArgErr()) + } + // Follows RFC 6891 + if bufsize < 512 || bufsize > 4096 { + return -1, plugin.Error("bufsize", c.ArgErr()) + } + return bufsize, nil + default: + // Only 1 argument is acceptable + return -1, plugin.Error("bufsize", c.ArgErr()) + } + } + return -1, plugin.Error("bufsize", c.ArgErr()) +} diff --git a/plugin/bufsize/setup_test.go b/plugin/bufsize/setup_test.go new file mode 100644 index 00000000000..4d1705a0533 --- /dev/null +++ b/plugin/bufsize/setup_test.go @@ -0,0 +1,46 @@ +package bufsize + +import ( + "strings" + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetupBufsize(t *testing.T) { + tests := []struct { + input string + shouldErr bool + expectedData int + expectedErrContent string // substring from the expected error. Empty for positive cases. + }{ + {`bufsize`, false, 512, ""}, + {`bufsize "1232"`, false, 1232, ""}, + {`bufsize "5000"`, true, -1, "plugin"}, + {`bufsize "512 512"`, true, -1, "plugin"}, + {`bufsize "abc123"`, true, -1, "plugin"}, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.input) + bufsize, err := parse(c) + + if test.shouldErr && err == nil { + t.Errorf("Test %d: Expected error but found %s for input %s", i, err, test.input) + } + + if err != nil { + if !test.shouldErr { + t.Errorf("Test %d: Error found for input %s. Error: %v", i, test.input, err) + } + + if !strings.Contains(err.Error(), test.expectedErrContent) { + t.Errorf("Test %d: Expected error to contain: %v, found error: %v, input: %s", i, test.expectedErrContent, err, test.input) + } + } + + if !test.shouldErr && bufsize != test.expectedData { + t.Errorf("Test %d: Bufsize not correctly set for input %s. Expected: %d, actual: %d", i, test.input, test.expectedData, bufsize) + } + } +} diff --git a/plugin/cache/OWNERS b/plugin/cache/OWNERS deleted file mode 100644 index 6b9f2f0df23..00000000000 --- a/plugin/cache/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - grobie - - miekg -approvers: - - grobie - - miekg diff --git a/plugin/cache/README.md b/plugin/cache/README.md index 8b2bdf075ca..14636e861bb 100644 --- a/plugin/cache/README.md +++ b/plugin/cache/README.md @@ -34,6 +34,7 @@ cache [TTL] [ZONES...] { success CAPACITY [TTL] [MINTTL] denial CAPACITY [TTL] [MINTTL] prefetch AMOUNT [[DURATION] [PERCENTAGE%]] + serve_stale [DURATION] } ~~~ @@ -50,6 +51,10 @@ cache [TTL] [ZONES...] { **DURATION** defaults to 1m. Prefetching will happen when the TTL drops below **PERCENTAGE**, which defaults to `10%`, or latest 1 second before TTL expiration. Values should be in the range `[10%, 90%]`. Note the percent sign is mandatory. **PERCENTAGE** is treated as an `int`. +* `serve_stale`, when serve\_stale is set, cache always will serve an expired entry to a client if there is one + available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache + entry to the client. The responses have a TTL of 0. **DURATION** is how far back to consider + stale responses as fresh. The default duration is 1h. ## Capacity and Eviction @@ -63,12 +68,13 @@ Entries with 0 TTL will remain in the cache until randomly evicted when the shar ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_cache_size{server, type}` - Total elements in the cache by cache type. * `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type. * `coredns_cache_misses_total{server}` - Counter of cache misses. * `coredns_cache_drops_total{server}` - Counter of dropped messages. +* `coredns_cache_served_stale_total{server}` - Counter of requests served from stale cache entries. Cache types are either "denial" or "success". `Server` is the server handling the request, see the metrics plugin for documentation. @@ -93,13 +99,13 @@ Proxy to Google Public DNS and only cache responses for example.org (or below). } ~~~ -Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500: +Enable caching for `example.org`, keep a positive cache size of 5000 and a negative cache size of 2500: ~~~ corefile - . { - cache { - success 5000 - denial 2500 +example.org { + cache { + success 5000 + denial 2500 } - } - ~~~ +} +~~~ diff --git a/plugin/cache/cache.go b/plugin/cache/cache.go index 69de55f17de..6b50c51cc67 100644 --- a/plugin/cache/cache.go +++ b/plugin/cache/cache.go @@ -15,7 +15,7 @@ import ( "github.com/miekg/dns" ) -// Cache is plugin that looks up responses in a cache and caches replies. +// Cache is a plugin that looks up responses in a cache and caches replies. // It has a success and a denial of existence cache. type Cache struct { Next plugin.Handler @@ -36,6 +36,8 @@ type Cache struct { duration time.Duration percentage int + staleUpTo time.Duration + // Testing. now func() time.Time } diff --git a/plugin/cache/cache_test.go b/plugin/cache/cache_test.go index 4afaf73c4da..138458c8f4c 100644 --- a/plugin/cache/cache_test.go +++ b/plugin/cache/cache_test.go @@ -2,10 +2,12 @@ package cache import ( "context" + "fmt" "testing" "time" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/pkg/response" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -191,7 +193,7 @@ func TestCache(t *testing.T) { m := tc.in.Msg() m = cacheMsg(m, tc) - state := request.Request{W: nil, Req: m} + state := request.Request{W: &test.ResponseWriter{}, Req: m} mt, _ := response.Typify(m, utc) valid, k := key(state.Name(), m, mt, state.Do()) @@ -233,7 +235,7 @@ func TestCacheZeroTTL(t *testing.T) { c := New() c.minpttl = 0 c.minnttl = 0 - c.Next = zeroTTLBackend() + c.Next = ttlBackend(0) req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -248,6 +250,52 @@ func TestCacheZeroTTL(t *testing.T) { } } +func TestServeFromStaleCache(t *testing.T) { + c := New() + c.Next = ttlBackend(60) + + req := new(dns.Msg) + req.SetQuestion("cached.org.", dns.TypeA) + ctx := context.TODO() + + // Cache example.org. + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + c.staleUpTo = 1 * time.Hour + c.ServeDNS(ctx, rec, req) + if c.pcache.Len() != 1 { + t.Fatalf("Msg with > 0 TTL should have been cached") + } + + // No more backend resolutions, just from cache if available. + c.Next = plugin.HandlerFunc(func(context.Context, dns.ResponseWriter, *dns.Msg) (int, error) { + return 255, nil // Below, a 255 means we tried querying upstream. + }) + + tests := []struct { + name string + futureMinutes int + expectedResult int + }{ + {"cached.org.", 30, 0}, + {"cached.org.", 60, 0}, + {"cached.org.", 70, 255}, + + {"notcached.org.", 30, 255}, + {"notcached.org.", 60, 255}, + {"notcached.org.", 70, 255}, + } + + for i, tt := range tests { + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + c.now = func() time.Time { return time.Now().Add(time.Duration(tt.futureMinutes) * time.Minute) } + r := req.Copy() + r.SetQuestion(tt.name, dns.TypeA) + if ret, _ := c.ServeDNS(ctx, rec, r); ret != tt.expectedResult { + t.Errorf("Test %d: expecting %v; got %v", i, tt.expectedResult, ret) + } + } +} + func BenchmarkCacheResponse(b *testing.B) { c := New() c.prefetch = 1 @@ -286,13 +334,13 @@ func BackendHandler() plugin.Handler { }) } -func zeroTTLBackend() plugin.Handler { +func ttlBackend(ttl int) plugin.Handler { return plugin.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { m := new(dns.Msg) m.SetReply(r) m.Response, m.RecursionAvailable = true, true - m.Answer = []dns.RR{test.A("example.org. 0 IN A 127.0.0.53")} + m.Answer = []dns.RR{test.A(fmt.Sprintf("example.org. %d IN A 127.0.0.53", ttl))} w.WriteMsg(m) return dns.RcodeSuccess, nil }) diff --git a/plugin/cache/fuzz.go b/plugin/cache/fuzz.go index 9bf6cb3a9d0..18e98fa9f2d 100644 --- a/plugin/cache/fuzz.go +++ b/plugin/cache/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package cache diff --git a/plugin/cache/handler.go b/plugin/cache/handler.go index 2d608e8d353..905a98ef491 100644 --- a/plugin/cache/handler.go +++ b/plugin/cache/handler.go @@ -26,37 +26,55 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) server := metrics.WithServer(ctx) - i, found := c.get(now, state, server) - if i != nil && found { - resp := i.toMsg(r, now) - - w.WriteMsg(resp) - - if c.prefetch > 0 { - ttl := i.ttl(now) - i.Freq.Update(c.duration, now) - - threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL))) - if i.Freq.Hits() >= c.prefetch && ttl <= threshold { - cw := newPrefetchResponseWriter(server, state, c) - go func(w dns.ResponseWriter) { - cachePrefetches.WithLabelValues(server).Inc() - plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r) - - // When prefetching we loose the item i, and with it the frequency - // that we've gathered sofar. See we copy the frequencies info back - // into the new item that was stored in the cache. - if i1 := c.exists(state); i1 != nil { - i1.Freq.Reset(now, i.Freq.Hits()) - } - }(cw) - } - } - return dns.RcodeSuccess, nil + ttl := 0 + i := c.getIgnoreTTL(now, state, server) + if i != nil { + ttl = i.ttl(now) + } + if i == nil || -ttl >= int(c.staleUpTo.Seconds()) { + crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server} + return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) } + if ttl < 0 { + servedStale.WithLabelValues(server).Inc() + // Adjust the time to get a 0 TTL in the reply built from a stale item. + now = now.Add(time.Duration(ttl) * time.Second) + go func() { + r := r.Copy() + crr := &ResponseWriter{Cache: c, state: state, server: server, prefetch: true, remoteAddr: w.LocalAddr()} + plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) + }() + } + resp := i.toMsg(r, now) + w.WriteMsg(resp) - crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server} - return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) + if c.shouldPrefetch(i, now) { + go c.doPrefetch(ctx, state, server, i, now) + } + return dns.RcodeSuccess, nil +} + +func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server string, i *item, now time.Time) { + cw := newPrefetchResponseWriter(server, state, c) + + cachePrefetches.WithLabelValues(server).Inc() + plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req) + + // When prefetching we loose the item i, and with it the frequency + // that we've gathered sofar. See we copy the frequencies info back + // into the new item that was stored in the cache. + if i1 := c.exists(state); i1 != nil { + i1.Freq.Reset(now, i.Freq.Hits()) + } +} + +func (c *Cache) shouldPrefetch(i *item, now time.Time) bool { + if c.prefetch <= 0 { + return false + } + i.Freq.Update(c.duration, now) + threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL))) + return i.Freq.Hits() >= c.prefetch && i.ttl(now) <= threshold } // Name implements the Handler interface. @@ -78,6 +96,27 @@ func (c *Cache) get(now time.Time, state request.Request, server string) (*item, return nil, false } +// getIgnoreTTL unconditionally returns an item if it exists in the cache. +func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item { + k := hash(state.Name(), state.QType(), state.Do()) + + if i, ok := c.ncache.Get(k); ok { + ttl := i.(*item).ttl(now) + if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) { + cacheHits.WithLabelValues(server, Denial).Inc() + } + return i.(*item) + } + if i, ok := c.pcache.Get(k); ok { + ttl := i.(*item).ttl(now) + if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) { + cacheHits.WithLabelValues(server, Success).Inc() + } + return i.(*item) + } + return nil +} + func (c *Cache) exists(state request.Request) *item { k := hash(state.Name(), state.QType(), state.Do()) if i, ok := c.ncache.Get(k); ok { @@ -124,4 +163,11 @@ var ( Name: "drops_total", Help: "The number responses that are not cached, because the reply is malformed.", }, []string{"server"}) + + servedStale = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "cache", + Name: "served_stale_total", + Help: "The number of requests served from stale cache entries.", + }, []string{"server"}) ) diff --git a/plugin/cache/item.go b/plugin/cache/item.go index edc7610a41e..3071f1512e0 100644 --- a/plugin/cache/item.go +++ b/plugin/cache/item.go @@ -54,7 +54,7 @@ func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg { m1 := new(dns.Msg) m1.SetReply(m) - // Set this to true as some DNS clients disgard the *entire* packet when it's non-authoritative. + // Set this to true as some DNS clients discard the *entire* packet when it's non-authoritative. // This is probably not according to spec, but the bit itself is not super useful as this point, so // just set it to true. m1.Authoritative = true diff --git a/plugin/cache/setup.go b/plugin/cache/setup.go index 3bffe8581f4..62c5c9d2c74 100644 --- a/plugin/cache/setup.go +++ b/plugin/cache/setup.go @@ -1,6 +1,7 @@ package cache import ( + "errors" "fmt" "strconv" "time" @@ -16,12 +17,7 @@ import ( var log = clog.NewWithPlugin("cache") -func init() { - caddy.RegisterPlugin("cache", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("cache", setup) } func setup(c *caddy.Controller) error { ca, err := cacheParse(c) @@ -36,7 +32,7 @@ func setup(c *caddy.Controller) error { c.OnStartup(func() error { metrics.MustRegister(c, cacheSize, cacheHits, cacheMisses, - cachePrefetches, cacheDrops) + cachePrefetches, cacheDrops, servedStale) return nil }) @@ -181,6 +177,22 @@ func cacheParse(c *caddy.Controller) (*Cache, error) { ca.percentage = num } + case "serve_stale": + args := c.RemainingArgs() + if len(args) > 1 { + return nil, c.ArgErr() + } + ca.staleUpTo = 1 * time.Hour + if len(args) == 1 { + d, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + if d < 0 { + return nil, errors.New("invalid negative duration for serve_stale") + } + ca.staleUpTo = d + } default: return nil, c.ArgErr() } diff --git a/plugin/cache/setup_test.go b/plugin/cache/setup_test.go index 975520d316b..6352bcadbf3 100644 --- a/plugin/cache/setup_test.go +++ b/plugin/cache/setup_test.go @@ -1,6 +1,7 @@ package cache import ( + "fmt" "testing" "time" @@ -113,3 +114,39 @@ func TestSetup(t *testing.T) { } } } + +func TestServeStale(t *testing.T) { + tests := []struct { + input string + shouldErr bool + staleUpTo time.Duration + }{ + {"serve_stale", false, 1 * time.Hour}, + {"serve_stale 20m", false, 20 * time.Minute}, + {"serve_stale 1h20m", false, 80 * time.Minute}, + {"serve_stale 0m", false, 0}, + {"serve_stale 0", false, 0}, + // fails + {"serve_stale 20", true, 0}, + {"serve_stale -20m", true, 0}, + {"serve_stale aa", true, 0}, + {"serve_stale 1m nono", true, 0}, + } + for i, test := range tests { + c := caddy.NewTestController("dns", fmt.Sprintf("cache {\n%s\n}", test.input)) + ca, err := cacheParse(c) + if test.shouldErr && err == nil { + t.Errorf("Test %v: Expected error but found nil", i) + continue + } else if !test.shouldErr && err != nil { + t.Errorf("Test %v: Expected no error but found error: %v", i, err) + continue + } + if test.shouldErr && err != nil { + continue + } + if ca.staleUpTo != test.staleUpTo { + t.Errorf("Test %v: Expected stale %v but found: %v", i, test.staleUpTo, ca.staleUpTo) + } + } +} diff --git a/plugin/cancel/OWNERS b/plugin/cancel/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/cancel/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/cancel/README.md b/plugin/cancel/README.md index 1615f640944..8561b1f6b86 100644 --- a/plugin/cancel/README.md +++ b/plugin/cancel/README.md @@ -2,14 +2,14 @@ ## Name -*cancel* - a plugin that cancels a request's context after 5001 milliseconds. +*cancel* - cancels a request's context after 5001 milliseconds. ## Description The *cancel* plugin creates a canceling context for each request. It adds a timeout that gets triggered after 5001 milliseconds. -The 5001 number is chosen because the default timeout for DNS clients is 5 seconds, after that they +The 5001 number was chosen because the default timeout for DNS clients is 5 seconds, after that they give up. A plugin interested in the cancellation status should call `plugin.Done()` on the context. If the @@ -25,7 +25,7 @@ cancel [TIMEOUT] ## Examples ~~~ corefile -. { +example.org { cancel whoami } @@ -34,7 +34,7 @@ cancel [TIMEOUT] Or with a custom timeout: ~~~ corefile -. { +example.org { cancel 1s whoami } diff --git a/plugin/cancel/cancel.go b/plugin/cancel/cancel.go index 10efabfea5b..9ded73e82df 100644 --- a/plugin/cancel/cancel.go +++ b/plugin/cancel/cancel.go @@ -13,21 +13,16 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("cancel", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("cancel", setup) } func setup(c *caddy.Controller) error { - ca := Cancel{timeout: 5001 * time.Millisecond} + ca := Cancel{} for c.Next() { args := c.RemainingArgs() switch len(args) { case 0: - break + ca.timeout = 5001 * time.Millisecond case 1: dur, err := time.ParseDuration(args[0]) if err != nil { diff --git a/plugin/cancel/cancel_test.go b/plugin/cancel/cancel_test.go index ceba9f5d277..f775518097a 100644 --- a/plugin/cancel/cancel_test.go +++ b/plugin/cancel/cancel_test.go @@ -34,7 +34,6 @@ func (s sleepPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns. return 0, nil } } - return 0, nil } func TestCancel(t *testing.T) { diff --git a/plugin/chaos/OWNERS b/plugin/chaos/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/chaos/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/chaos/README.md b/plugin/chaos/README.md index 3cd6dff41dc..9ce5216ae68 100644 --- a/plugin/chaos/README.md +++ b/plugin/chaos/README.md @@ -7,7 +7,7 @@ ## Description This is useful for retrieving version or author information from the server by querying a TXT record -for a special domainname in the CH class. +for a special domain name in the CH class. ## Syntax diff --git a/plugin/chaos/fuzz.go b/plugin/chaos/fuzz.go index f0e23b0831c..53667f2cd84 100644 --- a/plugin/chaos/fuzz.go +++ b/plugin/chaos/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package chaos diff --git a/plugin/chaos/setup.go b/plugin/chaos/setup.go index 33eba8a5002..42ce76b4f3e 100644 --- a/plugin/chaos/setup.go +++ b/plugin/chaos/setup.go @@ -11,13 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("chaos", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) - -} +func init() { plugin.Register("chaos", setup) } func setup(c *caddy.Controller) error { version, authors, err := parse(c) diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index 42bdf871b4c..60645e53ed5 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,5 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"Miciah", "bradbeam", "chrisohaver", "danehans", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "ironcladlou", "isolus", "johnbelamaric", "knobunc", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "smarterclayton", "superq", "varyoo", "yongtang"} \ No newline at end of file +var Owners = []string{"Miciah", "bradbeam", "chrisohaver", "danehans", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "ironcladlou", "isolus", "johnbelamaric", "knobunc", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "smarterclayton", "superq", "varyoo", "yongtang"} + diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md new file mode 100644 index 00000000000..9ae6d52e086 --- /dev/null +++ b/plugin/clouddns/README.md @@ -0,0 +1,74 @@ +# clouddns + +## Name + +*clouddns* - enables serving zone data from GCP Cloud DNS. + +## Description + +The *clouddns* plugin is useful for serving zones from resource record +sets in GCP Cloud DNS. This plugin supports all [Google Cloud DNS +records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). This plugin can +be used when CoreDNS is deployed on GCP or elsewhere. Note that this plugin accesses the resource +records through the Google Cloud API. For records in a privately hosted zone, it is not necessary to +place CoreDNS and this plugin in the associated VPC network. In fact the private hosted zone could +be created without any associated VPC and this plugin could still access the resource records under +the hosted zone. + +## Syntax + +~~~ txt +clouddns [ZONE:PROJECT_ID:HOSTED_ZONE_NAME...] { + credentials [FILENAME] + fallthrough [ZONES...] +} +~~~ + +* **ZONE** the name of the domain to be accessed. When there are multiple zones with overlapping + domains (private vs. public hosted zone), CoreDNS does the lookup in the given order here. + Therefore, for a non-existing resource record, SOA response will be from the rightmost zone. + +* **PROJECT_ID** the project ID of the Google Cloud project. + +* **HOSTED_ZONE_NAME** the name of the hosted zone that contains the resource record sets to be + accessed. + +* `credentials` is used for reading the credential file. + +* **FILENAME** GCP credentials file path (normally a .json file). + +* `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. + If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is + authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then + only queries for those zones will be subject to fallthrough. + +* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block + +## Examples + +Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: + +~~~ txt +example.org { + clouddns example.org.:gcp-example-project:example-zone + forward . 10.0.0.1 +} +~~~ + +Enable clouddns with fallthrough: + +~~~ txt +example.org { + clouddns example.org.:gcp-example-project:example-zone example.com.:gcp-example-project:example-zone-2 { + fallthrough example.gov. + } +} +~~~ + +Enable clouddns with multiple hosted zones with the same domain: + +~~~ txt +. { + clouddns example.org.:gcp-example-project:example-zone example.com.:gcp-example-project:other-example-zone +} +~~~ diff --git a/plugin/clouddns/clouddns.go b/plugin/clouddns/clouddns.go new file mode 100644 index 00000000000..3546a4fea3e --- /dev/null +++ b/plugin/clouddns/clouddns.go @@ -0,0 +1,222 @@ +// Package clouddns implements a plugin that returns resource records +// from GCP Cloud DNS. +package clouddns + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" + gcp "google.golang.org/api/dns/v1" +) + +// CloudDNS is a plugin that returns RR from GCP Cloud DNS. +type CloudDNS struct { + Next plugin.Handler + Fall fall.F + + zoneNames []string + client gcpDNS + upstream *upstream.Upstream + + zMu sync.RWMutex + zones zones +} + +type zone struct { + projectName string + zoneName string + z *file.Zone + dns string +} + +type zones map[string][]*zone + +// New reads from the keys map which uses domain names as its key and a colon separated +// string of project name and hosted zone name lists as its values, validates +// that each domain name/zone id pair does exist, and returns a new *CloudDNS. +// In addition to this, upstream is passed for doing recursive queries against CNAMEs. +// Returns error if it cannot verify any given domain name/zone id pair. +func New(ctx context.Context, c gcpDNS, keys map[string][]string, up *upstream.Upstream) (*CloudDNS, error) { + zones := make(map[string][]*zone, len(keys)) + zoneNames := make([]string, 0, len(keys)) + for dnsName, hostedZoneDetails := range keys { + for _, hostedZone := range hostedZoneDetails { + ss := strings.SplitN(hostedZone, ":", 2) + if len(ss) != 2 { + return nil, errors.New("either project or zone name missing") + } + err := c.zoneExists(ss[0], ss[1]) + if err != nil { + return nil, err + } + fqdnDNSName := dns.Fqdn(dnsName) + if _, ok := zones[fqdnDNSName]; !ok { + zoneNames = append(zoneNames, fqdnDNSName) + } + zones[fqdnDNSName] = append(zones[fqdnDNSName], &zone{projectName: ss[0], zoneName: ss[1], dns: fqdnDNSName, z: file.NewZone(fqdnDNSName, "")}) + } + } + return &CloudDNS{ + client: c, + zoneNames: zoneNames, + zones: zones, + upstream: up, + }, nil +} + +// Run executes first update, spins up an update forever-loop. +// Returns error if first update fails. +func (h *CloudDNS) Run(ctx context.Context) error { + if err := h.updateZones(ctx); err != nil { + return err + } + go func() { + for { + select { + case <-ctx.Done(): + log.Infof("Breaking out of CloudDNS update loop: %v", ctx.Err()) + return + case <-time.After(1 * time.Minute): + if err := h.updateZones(ctx); err != nil && ctx.Err() == nil /* Don't log error if ctx expired. */ { + log.Errorf("Failed to update zones: %v", err) + } + } + } + }() + return nil +} + +// ServeDNS implements the plugin.Handler interface. +func (h *CloudDNS) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + + zName := plugin.Zones(h.zoneNames).Matches(qname) + if zName == "" { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + z, ok := h.zones[zName] // ok true if we are authoritative for the zone + if !ok || z == nil { + return dns.RcodeServerFailure, nil + } + + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + var result file.Result + + for _, hostedZone := range z { + h.zMu.RLock() + m.Answer, m.Ns, m.Extra, result = hostedZone.z.Lookup(ctx, state, qname) + h.zMu.RUnlock() + + // Take the answer if it's non-empty OR if there is another + // record type exists for this name (NODATA). + if len(m.Answer) != 0 || result == file.NoData { + break + } + } + + if len(m.Answer) == 0 && result != file.NoData && h.Fall.Through(qname) { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + switch result { + case file.Success: + case file.NoData: + case file.NameError: + m.Rcode = dns.RcodeNameError + case file.Delegation: + m.Authoritative = false + case file.ServerFailure: + return dns.RcodeServerFailure, nil + } + + w.WriteMsg(m) + return dns.RcodeSuccess, nil +} + +func updateZoneFromRRS(rrs *gcp.ResourceRecordSetsListResponse, z *file.Zone) error { + for _, rr := range rrs.Rrsets { + var rfc1035 string + var r dns.RR + var err error + for _, value := range rr.Rrdatas { + if rr.Type == "CNAME" || rr.Type == "PTR" { + value = dns.Fqdn(value) + } + + // Assemble RFC 1035 conforming record to pass into dns scanner. + rfc1035 = fmt.Sprintf("%s %d IN %s %s", dns.Fqdn(rr.Name), rr.Ttl, rr.Type, value) + r, err = dns.NewRR(rfc1035) + if err != nil { + return fmt.Errorf("failed to parse resource record: %v", err) + } + } + + z.Insert(r) + } + return nil +} + +// updateZones re-queries resource record sets for each zone and updates the +// zone object. +// Returns error if any zones error'ed out, but waits for other zones to +// complete first. +func (h *CloudDNS) updateZones(ctx context.Context) error { + errc := make(chan error) + defer close(errc) + for zName, z := range h.zones { + go func(zName string, z []*zone) { + var err error + var rrListResponse *gcp.ResourceRecordSetsListResponse + defer func() { + errc <- err + }() + + for i, hostedZone := range z { + newZ := file.NewZone(zName, "") + newZ.Upstream = h.upstream + rrListResponse, err = h.client.listRRSets(hostedZone.projectName, hostedZone.zoneName) + if err != nil { + err = fmt.Errorf("failed to list resource records for %v:%v:%v from gcp: %v", zName, hostedZone.projectName, hostedZone.zoneName, err) + return + } + updateZoneFromRRS(rrListResponse, newZ) + + h.zMu.Lock() + (*z[i]).z = newZ + h.zMu.Unlock() + } + + }(zName, z) + } + // Collect errors (if any). This will also sync on all zones updates + // completion. + var errs []string + for i := 0; i < len(h.zones); i++ { + err := <-errc + if err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) != 0 { + return fmt.Errorf("errors updating zones: %v", errs) + } + return nil +} + +// Name implements the Handler interface. +func (h *CloudDNS) Name() string { return "clouddns" } diff --git a/plugin/clouddns/clouddns_test.go b/plugin/clouddns/clouddns_test.go new file mode 100644 index 00000000000..dafd65bba56 --- /dev/null +++ b/plugin/clouddns/clouddns_test.go @@ -0,0 +1,316 @@ +package clouddns + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/plugin/test" + crequest "github.com/coredns/coredns/request" + + "github.com/miekg/dns" + gcp "google.golang.org/api/dns/v1" +) + +type fakeGCPClient struct { + *gcp.Service +} + +func (c fakeGCPClient) zoneExists(projectName, hostedZoneName string) error { + return nil +} + +func (c fakeGCPClient) listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) { + if projectName == "bad-project" || hostedZoneName == "bad-zone" { + return nil, errors.New("the 'parameters.managedZone' resource named 'bad-zone' does not exist") + } + + var rr []*gcp.ResourceRecordSet + + if hostedZoneName == "sample-zone-1" { + rr = []*gcp.ResourceRecordSet{ + { + Name: "example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "www.example.org", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "*.www.example.org", + Ttl: 300, + Type: "CNAME", + Rrdatas: []string{"www.example.org"}, + }, + { + Name: "example.org.", + Ttl: 300, + Type: "AAAA", + Rrdatas: []string{"2001:db8:85a3::8a2e:370:7334"}, + }, + { + Name: "sample.example.org", + Ttl: 300, + Type: "CNAME", + Rrdatas: []string{"example.org"}, + }, + { + Name: "example.org.", + Ttl: 300, + Type: "PTR", + Rrdatas: []string{"ptr.example.org."}, + }, + { + Name: "org.", + Ttl: 300, + Type: "SOA", + Rrdatas: []string{"ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + { + Name: "com.", + Ttl: 300, + Type: "NS", + Rrdatas: []string{"ns-cloud-c4.googledomains.com."}, + }, + { + Name: "split-example.gov.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "swag.", + Ttl: 300, + Type: "YOLO", + Rrdatas: []string{"foobar"}, + }, + } + } else { + rr = []*gcp.ResourceRecordSet{ + { + Name: "split-example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "other-example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"3.5.7.9"}, + }, + { + Name: "org.", + Ttl: 300, + Type: "SOA", + Rrdatas: []string{"ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + } + } + + return &gcp.ResourceRecordSetsListResponse{Rrsets: rr}, nil +} + +func TestCloudDNS(t *testing.T) { + ctx := context.Background() + + r, err := New(ctx, fakeGCPClient{}, map[string][]string{"bad.": {"bad-project:bad-zone"}}, &upstream.Upstream{}) + if err != nil { + t.Fatalf("Failed to create Cloud DNS: %v", err) + } + if err = r.Run(ctx); err == nil { + t.Fatalf("Expected errors for zone bad.") + } + + r, err = New(ctx, fakeGCPClient{}, map[string][]string{"org.": {"sample-project-1:sample-zone-2", "sample-project-1:sample-zone-1"}, "gov.": {"sample-project-1:sample-zone-2", "sample-project-1:sample-zone-1"}}, &upstream.Upstream{}) + if err != nil { + t.Fatalf("Failed to create Cloud DNS: %v", err) + } + r.Fall = fall.Zero + r.Fall.SetZonesFromArgs([]string{"gov."}) + r.Next = test.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := crequest.Request{W: w, Req: r} + qname := state.Name() + m := new(dns.Msg) + rcode := dns.RcodeServerFailure + if qname == "example.gov." { + m.SetReply(r) + rr, err := dns.NewRR("example.gov. 300 IN A 2.4.6.8") + if err != nil { + t.Fatalf("Failed to create Resource Record: %v", err) + } + m.Answer = []dns.RR{rr} + + m.Authoritative = true + rcode = dns.RcodeSuccess + + } + + m.SetRcode(r, rcode) + w.WriteMsg(m) + return rcode, nil + }) + err = r.Run(ctx) + if err != nil { + t.Fatalf("Failed to initialize Cloud DNS: %v", err) + } + + tests := []struct { + qname string + qtype uint16 + wantRetCode int + wantAnswer []string // ownernames for the records in the additional section. + wantMsgRCode int + wantNS []string + expectedErr error + }{ + // 0. example.org A found - success. + { + qname: "example.org", + qtype: dns.TypeA, + wantAnswer: []string{"example.org. 300 IN A 1.2.3.4"}, + }, + // 1. example.org AAAA found - success. + { + qname: "example.org", + qtype: dns.TypeAAAA, + wantAnswer: []string{"example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334"}, + }, + // 2. exampled.org PTR found - success. + { + qname: "example.org", + qtype: dns.TypePTR, + wantAnswer: []string{"example.org. 300 IN PTR ptr.example.org."}, + }, + // 3. sample.example.org points to example.org CNAME. + // Query must return both CNAME and A recs. + { + qname: "sample.example.org", + qtype: dns.TypeA, + wantAnswer: []string{ + "sample.example.org. 300 IN CNAME example.org.", + "example.org. 300 IN A 1.2.3.4", + }, + }, + // 4. Explicit CNAME query for sample.example.org. + // Query must return just CNAME. + { + qname: "sample.example.org", + qtype: dns.TypeCNAME, + wantAnswer: []string{"sample.example.org. 300 IN CNAME example.org."}, + }, + // 5. Explicit SOA query for example.org. + { + qname: "example.org", + qtype: dns.TypeSOA, + wantAnswer: []string{"org. 300 IN SOA ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 6. Explicit SOA query for example.org. + { + qname: "example.org", + qtype: dns.TypeNS, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 7. AAAA query for split-example.org must return NODATA. + { + qname: "split-example.gov", + qtype: dns.TypeAAAA, + wantRetCode: dns.RcodeSuccess, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 8. Zone not configured. + { + qname: "badexample.com", + qtype: dns.TypeA, + wantRetCode: dns.RcodeServerFailure, + wantMsgRCode: dns.RcodeServerFailure, + }, + // 9. No record found. Return SOA record. + { + qname: "bad.org", + qtype: dns.TypeA, + wantRetCode: dns.RcodeSuccess, + wantMsgRCode: dns.RcodeNameError, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 10. No record found. Fallthrough. + { + qname: "example.gov", + qtype: dns.TypeA, + wantAnswer: []string{"example.gov. 300 IN A 2.4.6.8"}, + }, + // 11. other-zone.example.org is stored in a different hosted zone. success + { + qname: "other-example.org", + qtype: dns.TypeA, + wantAnswer: []string{"other-example.org. 300 IN A 3.5.7.9"}, + }, + // 12. split-example.org only has A record. Expect NODATA. + { + qname: "split-example.org", + qtype: dns.TypeAAAA, + wantNS: []string{"org. 300 IN SOA ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 13. *.www.example.org is a wildcard CNAME to www.example.org. + { + qname: "a.www.example.org", + qtype: dns.TypeA, + wantAnswer: []string{ + "a.www.example.org. 300 IN CNAME www.example.org.", + "www.example.org. 300 IN A 1.2.3.4", + }, + }, + } + + for ti, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), tc.qtype) + + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + code, err := r.ServeDNS(ctx, rec, req) + + if err != tc.expectedErr { + t.Fatalf("Test %d: Expected error %v, but got %v", ti, tc.expectedErr, err) + } + if code != int(tc.wantRetCode) { + t.Fatalf("Test %d: Expected returned status code %s, but got %s", ti, dns.RcodeToString[tc.wantRetCode], dns.RcodeToString[code]) + } + + if tc.wantMsgRCode != rec.Msg.Rcode { + t.Errorf("Test %d: Unexpected msg status code. Want: %s, got: %s", ti, dns.RcodeToString[tc.wantMsgRCode], dns.RcodeToString[rec.Msg.Rcode]) + } + + if len(tc.wantAnswer) != len(rec.Msg.Answer) { + t.Errorf("Test %d: Unexpected number of Answers. Want: %d, got: %d", ti, len(tc.wantAnswer), len(rec.Msg.Answer)) + } else { + for i, gotAnswer := range rec.Msg.Answer { + if gotAnswer.String() != tc.wantAnswer[i] { + t.Errorf("Test %d: Unexpected answer.\nWant:\n\t%s\nGot:\n\t%s", ti, tc.wantAnswer[i], gotAnswer) + } + } + } + + if len(tc.wantNS) != len(rec.Msg.Ns) { + t.Errorf("Test %d: Unexpected NS number. Want: %d, got: %d", ti, len(tc.wantNS), len(rec.Msg.Ns)) + } else { + for i, ns := range rec.Msg.Ns { + got, ok := ns.(*dns.SOA) + if !ok { + t.Errorf("Test %d: Unexpected NS type. Want: SOA, got: %v", ti, reflect.TypeOf(got)) + } + if got.String() != tc.wantNS[i] { + t.Errorf("Test %d: Unexpected NS.\nWant: %v\nGot: %v", ti, tc.wantNS[i], got) + } + } + } + } +} diff --git a/plugin/clouddns/gcp.go b/plugin/clouddns/gcp.go new file mode 100644 index 00000000000..6d9d85d432c --- /dev/null +++ b/plugin/clouddns/gcp.go @@ -0,0 +1,32 @@ +package clouddns + +import gcp "google.golang.org/api/dns/v1" + +type gcpDNS interface { + zoneExists(projectName, hostedZoneName string) error + listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) +} + +type gcpClient struct { + *gcp.Service +} + +// zoneExists is a wrapper method around `gcp.Service.ManagedZones.Get` +// it checks if the provided zone name for a given project exists. +func (c gcpClient) zoneExists(projectName, hostedZoneName string) error { + _, err := c.ManagedZones.Get(projectName, hostedZoneName).Do() + if err != nil { + return err + } + return nil +} + +// listRRSets is a wrapper method around `gcp.Service.ResourceRecordSets.List` +// it fetches and returns the record sets for a hosted zone. +func (c gcpClient) listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) { + rr, err := c.ResourceRecordSets.List(projectName, hostedZoneName).Do() + if err != nil { + return nil, err + } + return rr, nil +} diff --git a/plugin/clouddns/log_test.go b/plugin/clouddns/log_test.go new file mode 100644 index 00000000000..148635b4be8 --- /dev/null +++ b/plugin/clouddns/log_test.go @@ -0,0 +1,5 @@ +package clouddns + +import clog "github.com/coredns/coredns/plugin/pkg/log" + +func init() { clog.Discard() } diff --git a/plugin/clouddns/setup.go b/plugin/clouddns/setup.go new file mode 100644 index 00000000000..7de8efe8986 --- /dev/null +++ b/plugin/clouddns/setup.go @@ -0,0 +1,105 @@ +package clouddns + +import ( + "context" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/fall" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/upstream" + + "github.com/caddyserver/caddy" + gcp "google.golang.org/api/dns/v1" + "google.golang.org/api/option" +) + +var log = clog.NewWithPlugin("clouddns") + +func init() { plugin.Register("clouddns", setup) } + +// exposed for testing +var f = func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + var err error + var client *gcp.Service + if opt != nil { + client, err = gcp.NewService(ctx, opt) + } else { + // if credentials file is not provided in the Corefile + // authenticate the client using env variables + client, err = gcp.NewService(ctx) + } + return gcpClient{client}, err +} + +func setup(c *caddy.Controller) error { + for c.Next() { + keyPairs := map[string]struct{}{} + keys := map[string][]string{} + + var fall fall.F + up := upstream.New() + + args := c.RemainingArgs() + + for i := 0; i < len(args); i++ { + parts := strings.SplitN(args[i], ":", 3) + if len(parts) != 3 { + return c.Errf("invalid zone '%s'", args[i]) + } + dnsName, projectName, hostedZone := parts[0], parts[1], parts[2] + if dnsName == "" || projectName == "" || hostedZone == "" { + return c.Errf("invalid zone '%s'", args[i]) + } + if _, ok := keyPairs[args[i]]; ok { + return c.Errf("conflict zone '%s'", args[i]) + } + + keyPairs[args[i]] = struct{}{} + keys[dnsName] = append(keys[dnsName], projectName+":"+hostedZone) + } + + var opt option.ClientOption + for c.NextBlock() { + switch c.Val() { + case "upstream": + c.RemainingArgs() // eats args + // if filepath is provided in the Corefile use it to authenticate the dns client + case "credentials": + if c.NextArg() { + opt = option.WithCredentialsFile(c.Val()) + } else { + return c.ArgErr() + } + case "fallthrough": + fall.SetZonesFromArgs(c.RemainingArgs()) + default: + return c.Errf("unknown property '%s'", c.Val()) + } + } + + ctx := context.Background() + client, err := f(ctx, opt) + if err != nil { + return err + } + + h, err := New(ctx, client, keys, up) + if err != nil { + return c.Errf("failed to create Cloud DNS plugin: %v", err) + } + h.Fall = fall + + if err := h.Run(ctx); err != nil { + return c.Errf("failed to initialize Cloud DNS plugin: %v", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) + } + + return nil +} diff --git a/plugin/clouddns/setup_test.go b/plugin/clouddns/setup_test.go new file mode 100644 index 00000000000..6d2997298b4 --- /dev/null +++ b/plugin/clouddns/setup_test.go @@ -0,0 +1,48 @@ +package clouddns + +import ( + "context" + "testing" + + "github.com/caddyserver/caddy" + "google.golang.org/api/option" +) + +func TestSetupCloudDNS(t *testing.T) { + f = func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + return fakeGCPClient{}, nil + } + + tests := []struct { + body string + expectedError bool + }{ + {`clouddns`, false}, + {`clouddns :`, true}, + {`clouddns ::`, true}, + {`clouddns example.org.:example-project:zone-name`, false}, + {`clouddns example.org.:example-project:zone-name { }`, false}, + {`clouddns example.org.:example-project: { }`, true}, + {`clouddns example.org.:example-project:zone-name { }`, false}, + {`clouddns example.org.:example-project:zone-name { wat +}`, true}, + {`clouddns example.org.:example-project:zone-name { + fallthrough +}`, false}, + {`clouddns example.org.:example-project:zone-name { + credentials +}`, true}, + {`clouddns example.org.:example-project:zone-name example.org.:example-project:zone-name { + }`, true}, + + {`clouddns example.org { + }`, true}, + } + + for _, test := range tests { + c := caddy.NewTestController("dns", test.body) + if err := setup(c); (err == nil) == test.expectedError { + t.Errorf("Unexpected errors: %v", err) + } + } +} diff --git a/plugin/debug/README.md b/plugin/debug/README.md index fd769843d8b..a6234866d6a 100644 --- a/plugin/debug/README.md +++ b/plugin/debug/README.md @@ -45,4 +45,4 @@ Disable the ability to recover from crashes and show debug logging: ## Also See -https://www.wireshark.org/docs/man-pages/text2pcap.html. +. diff --git a/plugin/debug/debug.go b/plugin/debug/debug.go index 225e5f42276..91cc6fcf0c3 100644 --- a/plugin/debug/debug.go +++ b/plugin/debug/debug.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("debug", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("debug", setup) } func setup(c *caddy.Controller) error { config := dnsserver.GetConfig(c) diff --git a/plugin/debug/pcap.go b/plugin/debug/pcap.go index 0663f6dce5b..493478a409f 100644 --- a/plugin/debug/pcap.go +++ b/plugin/debug/pcap.go @@ -9,7 +9,7 @@ import ( "github.com/miekg/dns" ) -// Hexdump converts the dns message m to a hex dump Whireshark can import. +// Hexdump converts the dns message m to a hex dump Wireshark can import. // See https://www.wireshark.org/docs/man-pages/text2pcap.html. // This output looks like this: // diff --git a/plugin/deprecated/setup.go b/plugin/deprecated/setup.go index 36c13bdb639..782f36f042e 100644 --- a/plugin/deprecated/setup.go +++ b/plugin/deprecated/setup.go @@ -20,7 +20,7 @@ import ( ) // removed has the names of the plugins that need to error on startup. -var removed = []string{"reverse"} +var removed = []string{""} func setup(c *caddy.Controller) error { c.Next() @@ -29,10 +29,7 @@ func setup(c *caddy.Controller) error { } func init() { - for _, plugin := range removed { - caddy.RegisterPlugin(plugin, caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) + for _, plug := range removed { + plugin.Register(plug, setup) } } diff --git a/plugin/dnssec/OWNERS b/plugin/dnssec/OWNERS deleted file mode 100644 index 1bdb8e3d565..00000000000 --- a/plugin/dnssec/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - isolus - - miekg -approvers: - - isolus - - miekg diff --git a/plugin/dnssec/README.md b/plugin/dnssec/README.md index c8e17bf4f73..ef4eb7b5d39 100644 --- a/plugin/dnssec/README.md +++ b/plugin/dnssec/README.md @@ -2,11 +2,11 @@ ## Name -*dnssec* - enable on-the-fly DNSSEC signing of served data. +*dnssec* - enables on-the-fly DNSSEC signing of served data. ## Description -With *dnssec* any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated +With *dnssec*, any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is *not* supported. @@ -51,7 +51,7 @@ used (See [bugs](#bugs)). ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_dnssec_cache_size{server, type}` - total elements in the cache, type is "signature". * `coredns_dnssec_cache_hits_total{server}` - Counter of cache hits. diff --git a/plugin/dnssec/dnskey.go b/plugin/dnssec/dnskey.go index ea769514db3..6ca89802ea5 100644 --- a/plugin/dnssec/dnskey.go +++ b/plugin/dnssec/dnskey.go @@ -9,8 +9,9 @@ import ( "time" "github.com/coredns/coredns/request" - "github.com/miekg/dns" + + "golang.org/x/crypto/ed25519" ) // DNSKEY holds a DNSSEC public and private key used for on-the-fly signing. @@ -55,6 +56,9 @@ func ParseKeyFile(pubFile, privFile string) (*DNSKEY, error) { if s, ok := p.(*ecdsa.PrivateKey); ok { return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: s, tag: dk.KeyTag()}, nil } + if s, ok := p.(ed25519.PrivateKey); ok { + return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: s, tag: dk.KeyTag()}, nil + } return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: nil, tag: 0}, errors.New("no private key found") } @@ -79,12 +83,12 @@ func (d Dnssec) getDNSKEY(state request.Request, zone string, do bool, server st return m } -// Return true iff this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). +// Return true if, and only if, this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). func (k DNSKEY) isZSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 0 } -// Return true iff this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). +// Return true if, and only if, this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). func (k DNSKEY) isKSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 1 } diff --git a/plugin/dnssec/dnssec_test.go b/plugin/dnssec/dnssec_test.go index e60b5ee7ecc..fb8a128def0 100644 --- a/plugin/dnssec/dnssec_test.go +++ b/plugin/dnssec/dnssec_test.go @@ -153,20 +153,6 @@ func testMsgCname() *dns.Msg { } } -func testDelegationMsg() *dns.Msg { - return &dns.Msg{ - Ns: []dns.RR{ - test.NS("miek.nl. 3600 IN NS linode.atoom.net."), - test.NS("miek.nl. 3600 IN NS ns-ext.nlnetlabs.nl."), - test.NS("miek.nl. 3600 IN NS omval.tednet.nl."), - }, - Extra: []dns.RR{ - test.A("omval.tednet.nl. 3600 IN A 185.49.141.42"), - test.AAAA("omval.tednet.nl. 3600 IN AAAA 2a04:b900:0:100::42"), - }, - } -} - func testMsgDname() *dns.Msg { return &dns.Msg{ Answer: []dns.RR{ diff --git a/plugin/dnssec/handler.go b/plugin/dnssec/handler.go index 6153bf33110..a901b746a8f 100644 --- a/plugin/dnssec/handler.go +++ b/plugin/dnssec/handler.go @@ -55,13 +55,6 @@ var ( Help: "The number of elements in the dnssec cache.", }, []string{"server", "type"}) - cacheCapacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: plugin.Namespace, - Subsystem: "dnssec", - Name: "cache_capacity", - Help: "The dnssec cache's capacity.", - }, []string{"server", "type"}) - cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: plugin.Namespace, Subsystem: "dnssec", diff --git a/plugin/dnssec/setup.go b/plugin/dnssec/setup.go index 54a2390fd19..f410fb8b5a5 100644 --- a/plugin/dnssec/setup.go +++ b/plugin/dnssec/setup.go @@ -17,12 +17,7 @@ import ( var log = clog.NewWithPlugin("dnssec") -func init() { - caddy.RegisterPlugin("dnssec", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("dnssec", setup) } func setup(c *caddy.Controller) error { zones, keys, capacity, splitkeys, err := dnssecParse(c) diff --git a/plugin/dnstap/OWNERS b/plugin/dnstap/OWNERS deleted file mode 100644 index 6f67242973d..00000000000 --- a/plugin/dnstap/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - varyoo - - yongtang -approvers: - - varyoo - - yongtang diff --git a/plugin/dnstap/README.md b/plugin/dnstap/README.md index 81152e7422b..afcd19bf272 100644 --- a/plugin/dnstap/README.md +++ b/plugin/dnstap/README.md @@ -2,11 +2,11 @@ ## Name -*dnstap* - enable logging to dnstap. +*dnstap* - enables logging to dnstap. ## Description -dnstap is a flexible, structured binary log format for DNS software: http://dnstap.info. With this +dnstap is a flexible, structured binary log format for DNS software; see http://dnstap.info. With this plugin you make CoreDNS output dnstap logging. Note that there is an internal buffer, so expect at least 13 requests before the server sends its diff --git a/plugin/dnstap/dnstapio/dnstap_encoder.go b/plugin/dnstap/dnstapio/dnstap_encoder.go index 07dfc841349..65b15f5875c 100644 --- a/plugin/dnstap/dnstapio/dnstap_encoder.go +++ b/plugin/dnstap/dnstapio/dnstap_encoder.go @@ -11,7 +11,6 @@ import ( ) const ( - frameLenSize = 4 protobufSize = 1024 * 1024 ) diff --git a/plugin/dnstap/dnstapio/io.go b/plugin/dnstap/dnstapio/io.go index 65e2e222e35..9a4c26042ef 100644 --- a/plugin/dnstap/dnstapio/io.go +++ b/plugin/dnstap/dnstapio/io.go @@ -70,7 +70,7 @@ func (dio *dnstapIO) newConnect() error { return dio.enc.resetWriter(dio.conn) } -// Connect connects to the dnstop endpoint. +// Connect connects to the dnstap endpoint. func (dio *dnstapIO) Connect() { if err := dio.newConnect(); err != nil { log.Error("No connection to dnstap endpoint") diff --git a/plugin/dnstap/dnstapio/io_test.go b/plugin/dnstap/dnstapio/io_test.go index dc84cee7752..4716b4fd473 100644 --- a/plugin/dnstap/dnstapio/io_test.go +++ b/plugin/dnstap/dnstapio/io_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/coredns/coredns/plugin/pkg/reuseport" + tap "github.com/dnstap/golang-dnstap" fs "github.com/farsightsec/golang-framestream" ) @@ -55,7 +57,7 @@ func TestTransport(t *testing.T) { for _, param := range transport { // Start TCP listener - l, err := net.Listen(param[0], param[1]) + l, err := reuseport.Listen(param[0], param[1]) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -82,7 +84,7 @@ func TestRace(t *testing.T) { count := 10 // Start TCP listener - l, err := net.Listen("tcp", endpointTCP) + l, err := reuseport.Listen("tcp", endpointTCP) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -115,7 +117,7 @@ func TestReconnect(t *testing.T) { count := 5 // Start TCP listener - l, err := net.Listen("tcp", endpointTCP) + l, err := reuseport.Listen("tcp", endpointTCP) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -141,7 +143,7 @@ func TestReconnect(t *testing.T) { l.Close() // And start TCP listener again on the same port - l, err = net.Listen("tcp", addr) + l, err = reuseport.Listen("tcp", addr) if err != nil { t.Fatalf("Cannot start listener: %s", err) } diff --git a/plugin/dnstap/handler.go b/plugin/dnstap/handler.go index 1178dad79fc..0dde3a34689 100644 --- a/plugin/dnstap/handler.go +++ b/plugin/dnstap/handler.go @@ -30,13 +30,9 @@ type ( TapMessage(message *tap.Message) Pack() bool } - tapContext struct { - context.Context - Dnstap - } ) -// ContextKey defines the type of key that is used to save data into the context +// ContextKey defines the type of key that is used to save data into the context. type ContextKey string const ( diff --git a/plugin/dnstap/setup.go b/plugin/dnstap/setup.go index c9595ec73cc..ee481fe1171 100644 --- a/plugin/dnstap/setup.go +++ b/plugin/dnstap/setup.go @@ -6,21 +6,12 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/dnstap/dnstapio" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/parse" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) -var log = clog.NewWithPlugin("dnstap") - -func init() { - caddy.RegisterPlugin("dnstap", caddy.Plugin{ - ServerType: "dns", - Action: wrapSetup, - }) -} +func init() { plugin.Register("dnstap", wrapSetup) } func wrapSetup(c *caddy.Controller) error { if err := setup(c); err != nil { @@ -35,7 +26,7 @@ type config struct { full bool } -func parseConfig(d *caddyfile.Dispenser) (c config, err error) { +func parseConfig(d *caddy.Controller) (c config, err error) { d.Next() // directive name if !d.Args(&c.target) { @@ -51,9 +42,7 @@ func parseConfig(d *caddyfile.Dispenser) (c config, err error) { c.target = servers[0] } else { // default to UNIX socket - if strings.HasPrefix(c.target, "unix://") { - c.target = c.target[7:] - } + c.target = strings.TrimPrefix(c.target, "unix://") c.socket = true } @@ -63,7 +52,7 @@ func parseConfig(d *caddyfile.Dispenser) (c config, err error) { } func setup(c *caddy.Controller) error { - conf, err := parseConfig(&c.Dispenser) + conf, err := parseConfig(c) if err != nil { return err } diff --git a/plugin/dnstap/setup_test.go b/plugin/dnstap/setup_test.go index eef941d89c6..5ed8c3b6a09 100644 --- a/plugin/dnstap/setup_test.go +++ b/plugin/dnstap/setup_test.go @@ -21,7 +21,7 @@ func TestConfig(t *testing.T) { } for _, c := range tests { cad := caddy.NewTestController("dns", c.file) - conf, err := parseConfig(&cad.Dispenser) + conf, err := parseConfig(cad) if c.fail { if err == nil { t.Errorf("%s: %s", c.file, err) diff --git a/plugin/done.go b/plugin/done.go index 3f53273dade..c6ff8633b39 100644 --- a/plugin/done.go +++ b/plugin/done.go @@ -10,5 +10,4 @@ func Done(ctx context.Context) bool { default: return false } - return false } diff --git a/plugin/erratic/OWNERS b/plugin/erratic/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/erratic/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/erratic/README.md b/plugin/erratic/README.md index e3bd79ae80a..eeffd97fc19 100644 --- a/plugin/erratic/README.md +++ b/plugin/erratic/README.md @@ -6,15 +6,12 @@ ## Description -*erratic* returns a static response to all queries, but the responses can be delayed, dropped or truncated. -The *erratic* plugin will respond to every A or AAAA query. For any other type it will return -a SERVFAIL response. The reply for A will return 192.0.2.53 (see [RFC -5737](https://tools.ietf.org/html/rfc5737), -for AAAA it returns 2001:DB8::53 (see [RFC 3849](https://tools.ietf.org/html/rfc3849)) and for an -AXFR request it will respond with a small zone transfer. - -*erratic* can also be used in conjunction with the *autopath* plugin. This is mostly to aid in -testing. +*erratic* returns a static response to all queries, but the responses can be delayed, +dropped or truncated. The *erratic* plugin will respond to every A or AAAA query. For +any other type it will return a SERVFAIL response (except AXFR). The reply for A will return +192.0.2.53 ([RFC 5737](https://tools.ietf.org/html/rfc5737)), for AAAA it returns 2001:DB8::53 ([RFC +3849](https://tools.ietf.org/html/rfc3849)). For an AXFR request it will respond with a small +zone transfer. ## Syntax @@ -40,17 +37,17 @@ This plugin reports readiness to the ready plugin. ## Examples ~~~ corefile -. { +example.org { erratic { drop 3 } } ~~~ -Or even shorter if the defaults suits you. Note this only drops queries, it does not delay them. +Or even shorter if the defaults suit you. Note this only drops queries, it does not delay them. ~~~ corefile -. { +example.org { erratic } ~~~ @@ -58,7 +55,7 @@ Or even shorter if the defaults suits you. Note this only drops queries, it does Delay 1 in 3 queries for 50ms ~~~ corefile -. { +example.org { erratic { delay 3 50ms } @@ -68,7 +65,7 @@ Delay 1 in 3 queries for 50ms Delay 1 in 3 and truncate 1 in 5. ~~~ corefile -. { +example.org { erratic { delay 3 5ms truncate 5 @@ -79,7 +76,7 @@ Delay 1 in 3 and truncate 1 in 5. Drop every second query. ~~~ corefile -. { +example.org { erratic { drop 2 truncate 2 @@ -89,5 +86,4 @@ Drop every second query. ## Also See -[RFC 3849](https://tools.ietf.org/html/rfc3849) and -[RFC 5737](https://tools.ietf.org/html/rfc5737). +[RFC 3849](https://tools.ietf.org/html/rfc3849) and [RFC 5737](https://tools.ietf.org/html/rfc5737). diff --git a/plugin/erratic/setup.go b/plugin/erratic/setup.go index 98ac4f0c2ab..1cc6e048df7 100644 --- a/plugin/erratic/setup.go +++ b/plugin/erratic/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("erratic", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("erratic", setup) } func setup(c *caddy.Controller) error { e, err := parseErratic(c) diff --git a/plugin/erratic/xfr.go b/plugin/erratic/xfr.go index 8e2b3179493..e1ec77ee99c 100644 --- a/plugin/erratic/xfr.go +++ b/plugin/erratic/xfr.go @@ -2,6 +2,7 @@ package erratic import ( "strings" + "sync" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -46,7 +47,11 @@ func xfr(state request.Request, truncate bool) { close(ch) }() - tr.Out(state.W, state.Req, ch) - state.W.Hijack() - return + wg := new(sync.WaitGroup) + wg.Add(1) + go func() { + tr.Out(state.W, state.Req, ch) + wg.Done() + }() + wg.Wait() } diff --git a/plugin/errors/OWNERS b/plugin/errors/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/errors/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/errors/README.md b/plugin/errors/README.md index 8b1449ea308..61ed582cb5a 100644 --- a/plugin/errors/README.md +++ b/plugin/errors/README.md @@ -2,7 +2,7 @@ ## Name -*errors* - enable error logging. +*errors* - enables error logging. ## Description @@ -38,10 +38,10 @@ For better performance, it's recommended to use the `^` or `$` metacharacters in ## Examples -Use the *whoami* to respond to queries and Log errors to standard output. +Use the *whoami* to respond to queries in the example.org domain and Log errors to standard output. ~~~ corefile -. { +example.org { whoami errors } diff --git a/plugin/errors/setup.go b/plugin/errors/setup.go index a196d220f7c..283f3dd1508 100644 --- a/plugin/errors/setup.go +++ b/plugin/errors/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("errors", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("errors", setup) } func setup(c *caddy.Controller) error { handler, err := errorsParse(c) diff --git a/plugin/etcd/OWNERS b/plugin/etcd/OWNERS deleted file mode 100644 index 256b53f9987..00000000000 --- a/plugin/etcd/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - nitisht -approvers: - - miekg - - nitisht diff --git a/plugin/etcd/README.md b/plugin/etcd/README.md index 2c4afbc1b36..9b7f75f2957 100644 --- a/plugin/etcd/README.md +++ b/plugin/etcd/README.md @@ -2,15 +2,19 @@ ## Name -*etcd* - enables reading zone data from an etcd version 3 instance. +*etcd* - enables SkyDNS service discovery from etcd. ## Description -The data in etcd instance has to be encoded as +The *etcd* plugin implements the (older) SkyDNS service discovery service. It is *not* suitable as +a generic DNS zone data plugin. Only a subset of DNS record types are implemented, and subdomains +and delegations are not handled at all. + +The data in the etcd instance has to be encoded as a [message](https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26) -like [SkyDNS](https://github.com/skynetservices/skydns). It should also work just like SkyDNS. +like [SkyDNS](https://github.com/skynetservices/skydns). It works just like SkyDNS. -The etcd plugin makes extensive use of the forward plugin to forward and query other servers in the +The etcd plugin makes extensive use of the *forward* plugin to forward and query other servers in the network. ## Syntax @@ -19,7 +23,7 @@ network. etcd [ZONES...] ~~~ -* **ZONES** zones etcd should be authoritative for. +* **ZONES** zones *etcd* should be authoritative for. The path will default to `/skydns` the local etcd3 proxy (http://localhost:2379). If no zones are specified the block's zone will be used as the zone. @@ -53,32 +57,39 @@ etcd [ZONES...] { is needed. ## Special Behaviour -CoreDNS etcd plugin leverages directory structure to look for related entries. For example an entry `/skydns/test/skydns/mx` would have entries like `/skydns/test/skydns/mx/a`, `/skydns/test/skydns/mx/b` and so on. Similarly a directory `/skydns/test/skydns/mx1` will have all `mx1` entries. - -With etcd3, support for [hierarchical keys are dropped](https://coreos.com/etcd/docs/latest/learning/api.html). This means there are no directories but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup on prefix `/skydns/test/skydns/mx/` to search for entries like `/skydns/test/skydns/mx/a` etc, and if there is nothing found on `/skydns/test/skydns/mx/`, it looks for `/skydns/test/skydns/mx` to find entries like `/skydns/test/skydns/mx1`. - -This causes two lookups from CoreDNS to etcdv3 in certain cases. -## Migration to `etcdv3` API +The *etcd* plugin leverages directory structure to look for related entries. For example +an entry `/skydns/test/skydns/mx` would have entries like `/skydns/test/skydns/mx/a`, +`/skydns/test/skydns/mx/b` and so on. Similarly a directory `/skydns/test/skydns/mx1` will have all +`mx1` entries. -With CoreDNS release `1.2.0`, you'll need to migrate existing CoreDNS related data (if any) on your etcd server to etcdv3 API. This is because with `etcdv3` support, CoreDNS can't see the data stored to an etcd server using `etcdv2` API. +With etcd3, support for [hierarchical keys are +dropped](https://coreos.com/etcd/docs/latest/learning/api.html). This means there are no directories +but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup +on prefix `/skydns/test/skydns/mx/` to search for entries like `/skydns/test/skydns/mx/a` etc, and +if there is nothing found on `/skydns/test/skydns/mx/`, it looks for `/skydns/test/skydns/mx` to +find entries like `/skydns/test/skydns/mx1`. -Refer this [blog by CoreOS team](https://coreos.com/blog/migrating-applications-etcd-v3.html) to migrate to etcdv3 API. +This causes two lookups from CoreDNS to etcdv3 in certain cases. ## Examples This is the default SkyDNS setup, with everything specified in full: ~~~ corefile -. { - etcd skydns.local { +skydns.local { + etcd { path /skydns endpoint http://localhost:2379 } prometheus - cache 160 skydns.local + cache loadbalance +} + +. { forward . 8.8.8.8:53 8.8.4.4:53 + cache } ~~~ @@ -86,12 +97,16 @@ Or a setup where we use `/etc/resolv.conf` as the basis for the proxy and the up when resolving external pointing CNAMEs. ~~~ corefile -. { - etcd skydns.local { +skydns.local { + etcd { path /skydns } - cache 160 skydns.local + cache +} + +. { forward . /etc/resolv.conf + cache } ~~~ @@ -102,10 +117,16 @@ etcd skydns.local { endpoint http://localhost:2379 http://localhost:4001 ... ~~~ +Before getting started with these examples, please setup `etcdctl` (with `etcdv3` API) as explained +[here](https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html). This will help you to put +sample keys in your etcd server. -Before getting started with these examples, please setup `etcdctl` (with `etcdv3` API) as explained [here](https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html). This will help you to put sample keys in your etcd server. - -If you prefer, you can use `curl` to populate the `etcd` server, but with `curl` the endpoint URL depends on the version of `etcd`. For instance, `etcd v3.2` or before uses only [CLIENT-URL]/v3alpha/* while `etcd v3.5` or later uses [CLIENT-URL]/v3/* . Also, Key and Value must be base64 encoded in the JSON payload. With `etcdctl` these details are automatically taken care off. You can check [this document](https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes) for details. +If you prefer, you can use `curl` to populate the `etcd` server, but with `curl` the +endpoint URL depends on the version of `etcd`. For instance, `etcd v3.2` or before uses only +[CLIENT-URL]/v3alpha/* while `etcd v3.5` or later uses [CLIENT-URL]/v3/* . Also, Key and Value must +be base64 encoded in the JSON payload. With `etcdctl` these details are automatically taken care +of. You can check [this document](https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes) +for details. ### Reverse zones @@ -134,7 +155,9 @@ reverse.skydns.local. ### Zone name as A record -The zone name itself can be used as A record. This behavior can be achieved by writing special entries to the ETCD path of your zone. If your zone is named `skydns.local` for example, you can create an `A` record for this zone as follows: +The zone name itself can be used as an `A` record. This behavior can be achieved by writing special +entries to the ETCD path of your zone. If your zone is named `skydns.local` for example, you can +create an `A` record for this zone as follows: ~~~ % etcdctl put /skydns/local/skydns/ '{"host":"1.1.1.1","ttl":60}' diff --git a/plugin/etcd/etcd.go b/plugin/etcd/etcd.go index 5c8f9f11089..305e8e492ee 100644 --- a/plugin/etcd/etcd.go +++ b/plugin/etcd/etcd.go @@ -15,9 +15,9 @@ import ( "github.com/coredns/coredns/request" "github.com/coredns/coredns/plugin/pkg/upstream" - etcdcv3 "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/mvcc/mvccpb" "github.com/miekg/dns" + etcdcv3 "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" ) const ( @@ -83,7 +83,7 @@ func (e *Etcd) Records(ctx context.Context, state request.Request, exact bool) ( func (e *Etcd) get(ctx context.Context, path string, recursive bool) (*etcdcv3.GetResponse, error) { ctx, cancel := context.WithTimeout(ctx, etcdTimeout) defer cancel() - if recursive == true { + if recursive { if !strings.HasSuffix(path, "/") { path = path + "/" } diff --git a/plugin/etcd/msg/path.go b/plugin/etcd/msg/path.go index c90798035ef..bfa4588632e 100644 --- a/plugin/etcd/msg/path.go +++ b/plugin/etcd/msg/path.go @@ -29,7 +29,7 @@ func Domain(s string) string { return dnsutil.Join(l[1 : len(l)-1]...) } -// PathWithWildcard ascts as Path, but if a name contains wildcards (* or any), the name will be +// PathWithWildcard acts as Path, but if a name contains wildcards (* or any), the name will be // chopped of before the (first) wildcard, and we do a higher level search and // later find the matching names. So service.*.skydns.local, will look for all // services under skydns.local and will later check for names that match diff --git a/plugin/etcd/msg/service_test.go b/plugin/etcd/msg/service_test.go index 24bea47f632..f334aa5cee1 100644 --- a/plugin/etcd/msg/service_test.go +++ b/plugin/etcd/msg/service_test.go @@ -123,11 +123,3 @@ func TestGroup(t *testing.T) { t.Fatalf("Failure to group seventh set: %v", sx) } } - -func BenchmarkNewSRV(b *testing.B) { - s := &Service{Host: "www,example.org", Port: 8080} - for n := 0; n < b.N; n++ { - srv := s.NewSRV("www.example.org.", 16) - srv = srv - } -} diff --git a/plugin/etcd/setup.go b/plugin/etcd/setup.go index 010833bbc86..a95549f3255 100644 --- a/plugin/etcd/setup.go +++ b/plugin/etcd/setup.go @@ -5,22 +5,14 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" - clog "github.com/coredns/coredns/plugin/pkg/log" mwtls "github.com/coredns/coredns/plugin/pkg/tls" "github.com/coredns/coredns/plugin/pkg/upstream" - etcdcv3 "github.com/coreos/etcd/clientv3" "github.com/caddyserver/caddy" + etcdcv3 "go.etcd.io/etcd/clientv3" ) -var log = clog.NewWithPlugin("etcd") - -func init() { - caddy.RegisterPlugin("etcd", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("etcd", setup) } func setup(c *caddy.Controller) error { e, err := etcdParse(c) diff --git a/plugin/etcd/setup_test.go b/plugin/etcd/setup_test.go index 20379628b01..8a84d0fd105 100644 --- a/plugin/etcd/setup_test.go +++ b/plugin/etcd/setup_test.go @@ -105,12 +105,12 @@ func TestSetupEtcd(t *testing.T) { if !test.shouldErr { if test.username != "" { if etcd.Client.Username != test.username { - t.Errorf("Etcd username not correctly set for input %s. Excpeted: '%+v', actual: '%+v'", test.input, test.username, etcd.Client.Username) + t.Errorf("Etcd username not correctly set for input %s. Expected: '%+v', actual: '%+v'", test.input, test.username, etcd.Client.Username) } } if test.password != "" { if etcd.Client.Password != test.password { - t.Errorf("Etcd password not correctly set for input %s. Excpeted: '%+v', actual: '%+v'", test.input, test.password, etcd.Client.Password) + t.Errorf("Etcd password not correctly set for input %s. Expected: '%+v', actual: '%+v'", test.input, test.password, etcd.Client.Password) } } } diff --git a/plugin/federation/OWNERS b/plugin/federation/OWNERS deleted file mode 100644 index 187c629c943..00000000000 --- a/plugin/federation/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - chrisohaver - - miekg -approvers: - - chrisohaver - - miekg diff --git a/plugin/federation/README.md b/plugin/federation/README.md deleted file mode 100644 index 96ba213ed05..00000000000 --- a/plugin/federation/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# federation - -## Name - -*federation* - enables federated queries to be resolved via the kubernetes plugin. - -## Description - -Enabling this plugin allows -[Federated](https://kubernetes.io/docs/tasks/federation/federation-service-discovery/) queries to be -resolved via the kubernetes plugin. - -Enabling *federation* without also having *kubernetes* is a noop. - -## Syntax - -~~~ -federation [ZONES...] { - NAME DOMAIN -} -~~~ - -* Each **NAME** and **DOMAIN** defines federation membership. One entry for each. A duplicate - **NAME** will silently overwrite any previous value. - -## Examples - -Here we handle all service requests in the `prod` and `stage` federations. - -~~~ -. { - kubernetes cluster.local - federation cluster.local { - prod prod.feddomain.com - staging staging.feddomain.com - } - forward . 192.168.1.12 -} -~~~ diff --git a/plugin/federation/federation.go b/plugin/federation/federation.go deleted file mode 100644 index 7fe2a6f6285..00000000000 --- a/plugin/federation/federation.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Package federation implements kubernetes federation. It checks if the qname matches -a possible federation. If this is the case and the captured answer is an NXDOMAIN, -federation is performed. If this is not the case the original answer is returned. - -The federation label is always the 2nd to last once the zone is chopped of. For -instance "nginx.mynamespace.myfederation.svc.example.com" has "myfederation" as -the federation label. For federation to work we do a normal k8s lookup -*without* that label, if that comes back with NXDOMAIN or NODATA(??) we create -a federation record and return that. - -Federation is only useful in conjunction with the kubernetes plugin, without it is a noop. -*/ -package federation - -import ( - "context" - - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/etcd/msg" - "github.com/coredns/coredns/plugin/pkg/dnsutil" - "github.com/coredns/coredns/plugin/pkg/nonwriter" - "github.com/coredns/coredns/plugin/pkg/upstream" - "github.com/coredns/coredns/request" - - "github.com/miekg/dns" -) - -// Federation contains the name to zone mapping used for federation in kubernetes. -type Federation struct { - f map[string]string - zones []string - Upstream *upstream.Upstream - - Next plugin.Handler - Federations Func -} - -// Func needs to be implemented by any plugin that implements -// federation. Right now this is only the kubernetes plugin. -type Func func(state request.Request, fname, fzone string) (msg.Service, error) - -// New returns a new federation. -func New() *Federation { - return &Federation{f: make(map[string]string)} -} - -// ServeDNS implements the plugin.Handle interface. -func (f *Federation) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - if f.Federations == nil { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - state := request.Request{W: w, Req: r} - - zone := plugin.Zones(f.zones).Matches(state.Name()) - if zone == "" { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - state.Zone = zone - - // Remove the federation label from the qname to see if something exists. - without, label := f.isNameFederation(state.Name(), state.Zone) - if without == "" { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - qname := r.Question[0].Name - r.Question[0].Name = without - state.Clear() - - // Start the next plugin, but with a nowriter, capture the result, if NXDOMAIN - // perform federation, otherwise just write the result. - nw := nonwriter.New(w) - ret, err := plugin.NextOrFailure(f.Name(), f.Next, ctx, nw, r) - - if !plugin.ClientWrite(ret) { - // something went wrong - r.Question[0].Name = qname - return ret, err - } - - if m := nw.Msg; m.Rcode != dns.RcodeNameError { - // If positive answer we need to substitute the original qname in the answer. - m.Question[0].Name = qname - for _, a := range m.Answer { - a.Header().Name = qname - } - - w.WriteMsg(m) - - return dns.RcodeSuccess, nil - } - - // Still here, we've seen NXDOMAIN and need to perform federation. - service, err := f.Federations(state, label, f.f[label]) // state references Req which has updated qname - if err != nil { - r.Question[0].Name = qname - return dns.RcodeServerFailure, err - } - - r.Question[0].Name = qname - - m := new(dns.Msg) - m.SetReply(r) - m.Authoritative = true - - m.Answer = []dns.RR{service.NewCNAME(state.QName(), service.Host)} - - if f.Upstream != nil { - aRecord, err := f.Upstream.Lookup(ctx, state, service.Host, state.QType()) - if err == nil && aRecord != nil && len(aRecord.Answer) > 0 { - m.Answer = append(m.Answer, aRecord.Answer...) - } - } - - w.WriteMsg(m) - return dns.RcodeSuccess, nil -} - -// Name implements the plugin.Handle interface. -func (f *Federation) Name() string { return "federation" } - -// IsNameFederation checks the qname to see if it is a potential federation. The federation -// label is always the 2nd to last once the zone is chopped of. For instance -// "nginx.mynamespace.myfederation.svc.example.com" has "myfederation" as the federation label. -// IsNameFederation returns a new qname with the federation label and the label itself or two -// empty strings if there wasn't a hit. -func (f *Federation) isNameFederation(name, zone string) (string, string) { - base, _ := dnsutil.TrimZone(name, zone) - - // TODO(miek): dns.PrevLabel is better for memory, or dns.Split. - labels := dns.SplitDomainName(base) - ll := len(labels) - if ll < 2 { - return "", "" - } - - fed := labels[ll-2] - - if _, ok := f.f[fed]; ok { - without := dnsutil.Join(labels[:ll-2]...) + labels[ll-1] + "." + zone - return without, fed - } - return "", "" -} diff --git a/plugin/federation/federation_test.go b/plugin/federation/federation_test.go deleted file mode 100644 index 64d6272b7f7..00000000000 --- a/plugin/federation/federation_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package federation - -import ( - "context" - "testing" - - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/pkg/dnstest" - "github.com/coredns/coredns/plugin/test" - - "github.com/miekg/dns" -) - -func TestIsNameFederation(t *testing.T) { - tests := []struct { - fed string - qname string - expectedZone string - }{ - {"prod", "nginx.mynamespace.prod.svc.example.com.", "nginx.mynamespace.svc.example.com."}, - {"prod", "nginx.mynamespace.staging.svc.example.com.", ""}, - {"prod", "nginx.mynamespace.example.com.", ""}, - {"prod", "example.com.", ""}, - {"prod", "com.", ""}, - } - - fed := New() - for i, tc := range tests { - fed.f[tc.fed] = "test-name" - if x, _ := fed.isNameFederation(tc.qname, "example.com."); x != tc.expectedZone { - t.Errorf("Test %d, failed to get zone, expected %s, got %s", i, tc.expectedZone, x) - } - } -} - -func TestFederationKubernetes(t *testing.T) { - tests := []test.Case{ - { - // service exists so we return the IP address associated with it. - Qname: "svc1.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.A("svc1.testns.prod.svc.cluster.local. 303 IN A 10.0.0.1"), - }, - }, - { - // service does not exist, do the federation dance. - Qname: "svc0.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.CNAME("svc0.testns.prod.svc.cluster.local. 303 IN CNAME svc0.testns.prod.svc.fd-az.fd-r.federal.example."), - }, - }, - } - - k := kubernetes.New([]string{"cluster.local."}) - k.APIConn = &APIConnFederationTest{zone: "fd-az", region: "fd-r"} - - fed := New() - fed.zones = []string{"cluster.local."} - fed.Federations = k.Federations - fed.Next = k - fed.f = map[string]string{ - "prod": "federal.example.", - } - - ctx := context.TODO() - for i, tc := range tests { - m := tc.Msg() - - rec := dnstest.NewRecorder(&test.ResponseWriter{}) - _, err := fed.ServeDNS(ctx, rec, m) - if err != nil { - t.Errorf("Test %d, expected no error, got %v", i, err) - return - } - - resp := rec.Msg - if err := test.SortAndCheck(resp, tc); err != nil { - t.Error(err) - } - } -} - -func TestFederationKubernetesMissingLabels(t *testing.T) { - tests := []test.Case{ - { - // service does not exist, do the federation dance. - Qname: "svc0.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.CNAME("svc0.testns.prod.svc.cluster.local. 303 IN CNAME svc0.testns.prod.svc.fd-az.fd-r.federal.example."), - }, - }, - } - - k := kubernetes.New([]string{"cluster.local."}) - k.APIConn = &APIConnFederationTest{zone: "", region: ""} - - fed := New() - fed.zones = []string{"cluster.local."} - fed.Federations = k.Federations - fed.Next = k - fed.f = map[string]string{ - "prod": "federal.example.", - } - - ctx := context.TODO() - for _, tc := range tests { - m := tc.Msg() - - rec := dnstest.NewRecorder(&test.ResponseWriter{}) - _, err := fed.ServeDNS(ctx, rec, m) - if err == nil { - t.Errorf("Expected an error") - return - } - } -} diff --git a/plugin/federation/kubernetes_api_test.go b/plugin/federation/kubernetes_api_test.go deleted file mode 100644 index 35b058b076f..00000000000 --- a/plugin/federation/kubernetes_api_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package federation - -import ( - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/kubernetes/object" - - api "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type APIConnFederationTest struct { - zone, region string -} - -func (APIConnFederationTest) HasSynced() bool { return true } -func (APIConnFederationTest) Run() { return } -func (APIConnFederationTest) Stop() error { return nil } -func (APIConnFederationTest) SvcIndexReverse(string) []*object.Service { return nil } -func (APIConnFederationTest) EpIndexReverse(string) []*object.Endpoints { return nil } -func (APIConnFederationTest) Modified() int64 { return 0 } - -func (APIConnFederationTest) PodIndex(string) []*object.Pod { - return []*object.Pod{ - {Namespace: "podns", PodIP: "10.240.0.1"}, // Remote IP set in test.ResponseWriter - } -} - -func (APIConnFederationTest) SvcIndex(string) []*object.Service { - svcs := []*object.Service{ - { - Name: "svc1", - Namespace: "testns", - ClusterIP: "10.0.0.1", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - { - Name: "hdls1", - Namespace: "testns", - ClusterIP: api.ClusterIPNone, - }, - { - Name: "external", - Namespace: "testns", - ExternalName: "ext.interwebs.test", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - } - return svcs -} - -func (APIConnFederationTest) ServiceList() []*object.Service { - svcs := []*object.Service{ - { - Name: "svc1", - Namespace: "testns", - ClusterIP: "10.0.0.1", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - { - Name: "hdls1", - Namespace: "testns", - ClusterIP: api.ClusterIPNone, - }, - { - Name: "external", - Namespace: "testns", - ExternalName: "ext.interwebs.test", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - } - return svcs -} - -func (APIConnFederationTest) EpIndex(string) []*object.Endpoints { - eps := []*object.Endpoints{ - { - Subsets: []object.EndpointSubset{ - { - Addresses: []object.EndpointAddress{ - {IP: "172.0.0.1", Hostname: "ep1a"}, - }, - Ports: []object.EndpointPort{ - {Port: 80, Protocol: "tcp", Name: "http"}, - }, - }, - }, - Name: "svc1", - Namespace: "testns", - }, - } - return eps -} - -func (APIConnFederationTest) EndpointsList() []*object.Endpoints { - eps := []*object.Endpoints{ - { - Subsets: []object.EndpointSubset{ - { - Addresses: []object.EndpointAddress{ - {IP: "172.0.0.1", Hostname: "ep1a"}, - }, - Ports: []object.EndpointPort{ - {Port: 80, Protocol: "tcp", Name: "http"}, - }, - }, - }, - Name: "svc1", - Namespace: "testns", - }, - } - return eps -} - -func (a APIConnFederationTest) GetNodeByName(name string) (*api.Node, error) { - return &api.Node{ - ObjectMeta: meta.ObjectMeta{ - Name: "test.node.foo.bar", - Labels: map[string]string{ - kubernetes.LabelRegion: a.region, - kubernetes.LabelZone: a.zone, - }, - }, - }, nil -} - -func (APIConnFederationTest) GetNamespaceByName(name string) (*api.Namespace, error) { - return &api.Namespace{ - ObjectMeta: meta.ObjectMeta{ - Name: name, - }, - }, nil -} diff --git a/plugin/federation/log_test.go b/plugin/federation/log_test.go deleted file mode 100644 index fa94817265d..00000000000 --- a/plugin/federation/log_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package federation - -import clog "github.com/coredns/coredns/plugin/pkg/log" - -func init() { clog.Discard() } diff --git a/plugin/federation/setup.go b/plugin/federation/setup.go deleted file mode 100644 index fde50853d77..00000000000 --- a/plugin/federation/setup.go +++ /dev/null @@ -1,94 +0,0 @@ -package federation - -import ( - "fmt" - - "github.com/coredns/coredns/core/dnsserver" - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/pkg/upstream" - "github.com/miekg/dns" - - "github.com/caddyserver/caddy" -) - -func init() { - caddy.RegisterPlugin("federation", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} - -func setup(c *caddy.Controller) error { - fed, err := federationParse(c) - if err != nil { - return plugin.Error("federation", err) - } - - // Do this in OnStartup, so all plugin has been initialized. - c.OnStartup(func() error { - m := dnsserver.GetConfig(c).Handler("kubernetes") - if m == nil { - return nil - } - if x, ok := m.(*kubernetes.Kubernetes); ok { - fed.Federations = x.Federations - } - return nil - }) - - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - fed.Next = next - return fed - }) - - return nil -} - -func federationParse(c *caddy.Controller) (*Federation, error) { - fed := New() - fed.Upstream = upstream.New() - - for c.Next() { - // federation [zones..] - zones := c.RemainingArgs() - var origins []string - if len(zones) > 0 { - origins = make([]string, len(zones)) - copy(origins, zones) - } else { - origins = make([]string, len(c.ServerBlockKeys)) - copy(origins, c.ServerBlockKeys) - } - - for c.NextBlock() { - x := c.Val() - switch x { - case "upstream": - // remove soon - c.RemainingArgs() - default: - args := c.RemainingArgs() - if x := len(args); x != 1 { - return fed, fmt.Errorf("need two arguments for federation, got %d", x) - } - - fed.f[x] = dns.Fqdn(args[0]) - } - } - - for i := range origins { - origins[i] = plugin.Host(origins[i]).Normalize() - } - - fed.zones = origins - - if len(fed.f) == 0 { - return fed, fmt.Errorf("at least one name to zone federation expected") - } - - return fed, nil - } - - return fed, nil -} diff --git a/plugin/federation/setup_test.go b/plugin/federation/setup_test.go deleted file mode 100644 index 6aed5cce772..00000000000 --- a/plugin/federation/setup_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package federation - -import ( - "testing" - - "github.com/caddyserver/caddy" -) - -func TestSetup(t *testing.T) { - tests := []struct { - input string - shouldErr bool - expectedLen int - expectedNameZone []string // contains only entry for now - }{ - // ok - {`federation { - prod prod.example.org - }`, false, 1, []string{"prod", "prod.example.org."}}, - - {`federation { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"prod", "prod.example.org."}}, - {`federation { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"staging", "staging.example.org."}}, - {`federation example.com { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"staging", "staging.example.org."}}, - // errors - {`federation { - }`, true, 0, []string{}}, - {`federation { - staging - }`, true, 0, []string{}}, - } - for i, test := range tests { - c := caddy.NewTestController("dns", test.input) - fed, err := federationParse(c) - if test.shouldErr && err == nil { - t.Errorf("Test %v: Expected error but found nil", i) - continue - } else if !test.shouldErr && err != nil { - t.Errorf("Test %v: Expected no error but found error: %v", i, err) - continue - } - if test.shouldErr && err != nil { - continue - } - - if x := len(fed.f); x != test.expectedLen { - t.Errorf("Test %v: Expected map length of %d, got: %d", i, test.expectedLen, x) - } - if x, ok := fed.f[test.expectedNameZone[0]]; !ok { - t.Errorf("Test %v: Expected name for %s, got nothing", i, test.expectedNameZone[0]) - } else { - if x != test.expectedNameZone[1] { - t.Errorf("Test %v: Expected zone: %s, got %s", i, test.expectedNameZone[1], x) - } - } - } -} diff --git a/plugin/file/OWNERS b/plugin/file/OWNERS deleted file mode 100644 index b69d5c74f66..00000000000 --- a/plugin/file/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -reviewers: - - miekg - - yongtang - - stp-ip -approvers: - - miekg - - yongtang diff --git a/plugin/file/README.md b/plugin/file/README.md index b8f9b50d5b5..e80b6b0dccd 100644 --- a/plugin/file/README.md +++ b/plugin/file/README.md @@ -6,7 +6,7 @@ ## Description -The file plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists +The *file* plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists on disk. If the zone file contains signatures (i.e., is signed using DNSSEC), correct DNSSEC answers are returned. Only NSEC is supported! If you use this setup *you* are responsible for re-signing the zonefile. @@ -18,9 +18,9 @@ file DBFILE [ZONES...] ~~~ * **DBFILE** the database file to read and parse. If the path is relative, the path from the *root* - directive will be prepended to it. + plugin will be prepended to it. * **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block - are used. + are used. If you want to round-robin A and AAAA responses look at the *loadbalance* plugin. @@ -55,7 +55,7 @@ example.org { Or use a single zone file for multiple zones: -~~~ +~~~ corefile . { file example.org.signed example.org example.net { transfer to * @@ -67,7 +67,7 @@ Or use a single zone file for multiple zones: Note that if you have a configuration like the following you may run into a problem of the origin not being correctly recognized: -~~~ +~~~ corefile . { file db.example.org } @@ -78,7 +78,7 @@ which, in this case, is the root zone. Any contents of `db.example.org` will the origin set; this may or may not do what you want. It's better to be explicit here and specify the correct origin. This can be done in two ways: -~~~ +~~~ corefile . { file db.example.org example.org } @@ -86,8 +86,12 @@ It's better to be explicit here and specify the correct origin. This can be done Or -~~~ +~~~ corefile example.org { file db.example.org } ~~~ + +## Also See + +See the *loadbalance* plugin if you need simple record shuffling. diff --git a/plugin/file/delete_test.go b/plugin/file/delete_test.go new file mode 100644 index 00000000000..26ee64e3a4a --- /dev/null +++ b/plugin/file/delete_test.go @@ -0,0 +1,65 @@ +package file + +import ( + "bytes" + "fmt" + "testing" + + "github.com/coredns/coredns/plugin/file/tree" + "github.com/coredns/coredns/plugin/test" + + "github.com/miekg/dns" +) + +/* +Create a zone with: + + apex + / + a MX + a A + +Test that: we create the proper tree and that delete +deletes the correct elements +*/ + +var tz = NewZone("example.org.", "db.example.org.") + +type treebuf struct { + *bytes.Buffer +} + +func (t *treebuf) printFunc(e *tree.Elem, rrs map[uint16][]dns.RR) error { + fmt.Fprintf(t.Buffer, "%v\n", rrs) // should be fixed order in new go versions. + return nil +} + +func TestZoneInsertAndDelete(t *testing.T) { + tz.Insert(test.SOA("example.org. IN SOA 1 2 3 4 5")) + + if x := tz.Apex.SOA.Header().Name; x != "example.org." { + t.Errorf("Failed to insert SOA, expected %s, git %s", "example.org.", x) + } + + // Insert two RRs and then remove one. + tz.Insert(test.A("a.example.org. IN A 127.0.0.1")) + tz.Insert(test.MX("a.example.org. IN MX 10 mx.example.org.")) + + tz.Delete(test.MX("a.example.org. IN MX 10 mx.example.org.")) + + tb := treebuf{new(bytes.Buffer)} + + tz.Walk(tb.printFunc) + if tb.String() != "map[1:[a.example.org.\t3600\tIN\tA\t127.0.0.1]]\n" { + t.Errorf("Expected 1 A record in tree, got %s", tb.String()) + } + + tz.Delete(test.A("a.example.org. IN A 127.0.0.1")) + + tb.Reset() + + tz.Walk(tb.printFunc) + if tb.String() != "" { + t.Errorf("Expected no record in tree, got %s", tb.String()) + } +} diff --git a/plugin/file/ent_test.go b/plugin/file/ent_test.go index 355cc65c098..73f50858397 100644 --- a/plugin/file/ent_test.go +++ b/plugin/file/ent_test.go @@ -55,7 +55,6 @@ func TestLookupEnt(t *testing.T) { } } -// fdjfdjkf const dbMiekENTNL = `; File written on Sat Apr 2 16:43:11 2016 ; dnssec_signzone version 9.10.3-P4-Ubuntu miek.nl. 1800 IN SOA linode.atoom.net. miek.miek.nl. ( diff --git a/plugin/file/file.go b/plugin/file/file.go index 2fe4b164476..d8de85cd3c4 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -58,7 +58,7 @@ func (f File) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i if ok { z.TransferIn() } else { - log.Infof("Notify from %s for %s: no serial increase seen", state.IP(), zone) + log.Infof("Notify from %s for %s: no SOA serial increase seen", state.IP(), zone) } if err != nil { log.Warningf("Notify from %s for %s: failed primary check: %s", state.IP(), zone, err) @@ -69,7 +69,10 @@ func (f File) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i return dns.RcodeSuccess, nil } - if z.Expired != nil && *z.Expired { + z.RLock() + exp := z.Expired + z.RUnlock() + if exp { log.Errorf("Zone %s is expired", zone) return dns.RcodeServerFailure, nil } @@ -112,14 +115,13 @@ type serialErr struct { } func (s *serialErr) Error() string { - return fmt.Sprintf("%s for origin %s in file %s, with serial %d", s.err, s.origin, s.zone, s.serial) + return fmt.Sprintf("%s for origin %s in file %s, with %d SOA serial", s.err, s.origin, s.zone, s.serial) } // Parse parses the zone in filename and returns a new Zone or an error. // If serial >= 0 it will reload the zone, if the SOA hasn't changed // it returns an error indicating nothing was read. func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { - zp := dns.NewZoneParser(f, dns.Fqdn(origin), fileName) zp.SetIncludeAllowed(true) z := NewZone(origin, fileName) @@ -129,12 +131,15 @@ func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { return nil, err } - if !seenSOA && serial >= 0 { + if !seenSOA { if s, ok := rr.(*dns.SOA); ok { - if s.Serial == uint32(serial) { // same serial + seenSOA = true + + // -1 is valid serial is we failed to load the file on startup. + + if serial >= 0 && s.Serial == uint32(serial) { // same serial return nil, &serialErr{err: "no change in SOA serial", origin: origin, zone: fileName, serial: serial} } - seenSOA = true } } @@ -143,7 +148,7 @@ func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { } } if !seenSOA { - return nil, fmt.Errorf("file %q has no SOA record", fileName) + return nil, fmt.Errorf("file %q has no SOA record for origin %s", fileName, origin) } return z, nil diff --git a/plugin/file/fuzz.go b/plugin/file/fuzz.go index 84f5c1853a2..e693f58bfba 100644 --- a/plugin/file/fuzz.go +++ b/plugin/file/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package file diff --git a/plugin/file/lookup.go b/plugin/file/lookup.go index 3a72a616332..3d8d899dfe6 100644 --- a/plugin/file/lookup.go +++ b/plugin/file/lookup.go @@ -3,6 +3,7 @@ package file import ( "context" + "github.com/coredns/coredns/plugin/file/rrutil" "github.com/coredns/coredns/plugin/file/tree" "github.com/coredns/coredns/request" @@ -32,31 +33,23 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) qtype := state.QType() do := state.Do() - if 0 < z.ReloadInterval { - z.reloadMu.RLock() - } - defer func() { - if 0 < z.ReloadInterval { - z.reloadMu.RUnlock() - } - }() - // If z is a secondary zone we might not have transferred it, meaning we have // all zone context setup, except the actual record. This means (for one thing) the apex // is empty and we don't have a SOA record. - z.apexMu.RLock() - soa := z.Apex.SOA - z.apexMu.RUnlock() - if soa == nil { + z.RLock() + ap := z.Apex + tr := z.Tree + z.RUnlock() + if ap.SOA == nil { return nil, nil, nil, ServerFailure } if qtype == dns.TypeSOA { - return z.soa(do), z.ns(do), nil, Success + return ap.soa(do), ap.ns(do), nil, Success } if qtype == dns.TypeNS && qname == z.origin { - nsrrs := z.ns(do) - glue := z.Glue(nsrrs, do) + nsrrs := ap.ns(do) + glue := tr.Glue(nsrrs, do) // technically this isn't glue return nsrrs, nil, glue, Success } @@ -76,7 +69,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // // Main for-loop handles delegation and finding or not finding the qname. // If found we check if it is a CNAME/DNAME and do CNAME processing - // We also check if we have type and do a nodata resposne. + // We also check if we have type and do a nodata response. // // If not found, we check the potential wildcard, and use that for further processing. // If not found and no wildcard we will process this as an NXDOMAIN response. @@ -87,14 +80,14 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) break } - elem, found = z.Tree.Search(parts) + elem, found = tr.Search(parts) if !found { // Apex will always be found, when we are here we can search for a wildcard // and save the result of that search. So when nothing match, but we have a // wildcard we should expand the wildcard. wildcard := replaceWithAsteriskLabel(parts) - if wild, found := z.Tree.Search(wildcard); found { + if wild, found := tr.Search(wildcard); found { wildElem = wild } @@ -106,15 +99,15 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) } // If we see DNAME records, we should return those. - if dnamerrs := elem.Types(dns.TypeDNAME); dnamerrs != nil { + if dnamerrs := elem.Type(dns.TypeDNAME); dnamerrs != nil { // Only one DNAME is allowed per name. We just pick the first one to synthesize from. dname := dnamerrs[0] if cname := synthesizeCNAME(state.Name(), dname.(*dns.DNAME)); cname != nil { - answer, ns, extra, rcode := z.additionalProcessing(ctx, state, elem, []dns.RR{cname}) + answer, ns, extra, rcode := z.externalLookup(ctx, state, elem, []dns.RR{cname}) if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeDNAME) + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeDNAME) dnamerrs = append(dnamerrs, sigs...) } @@ -130,7 +123,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) } // If we see NS records, it means the name as been delegated, and we should return the delegation. - if nsrrs := elem.Types(dns.TypeNS); nsrrs != nil { + if nsrrs := elem.Type(dns.TypeNS); nsrrs != nil { // If the query is specifically for DS and the qname matches the delegated name, we should // return the DS in the answer section and leave the rest empty, i.e. just continue the loop @@ -140,9 +133,9 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) continue } - glue := z.Glue(nsrrs, do) + glue := tr.Glue(nsrrs, do) if do { - dss := z.typeFromElem(elem, dns.TypeDS, do) + dss := typeFromElem(elem, dns.TypeDS, do) nsrrs = append(nsrrs, dss...) } @@ -160,33 +153,33 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Found entire name. if found && shot { - if rrs := elem.Types(dns.TypeCNAME); len(rrs) > 0 && qtype != dns.TypeCNAME { - return z.additionalProcessing(ctx, state, elem, rrs) + if rrs := elem.Type(dns.TypeCNAME); len(rrs) > 0 && qtype != dns.TypeCNAME { + return z.externalLookup(ctx, state, elem, rrs) } - rrs := elem.Types(qtype, qname) + rrs := elem.Type(qtype) // NODATA if len(rrs) == 0 { - ret := z.soa(do) + ret := ap.soa(do) if do { - nsec := z.typeFromElem(elem, dns.TypeNSEC, do) + nsec := typeFromElem(elem, dns.TypeNSEC, do) ret = append(ret, nsec...) } return nil, ret, nil, NoData } - // Additional section processing for MX, SRV. Check response and see if any of the names are in baliwick - + // Additional section processing for MX, SRV. Check response and see if any of the names are in bailiwick - // if so add IP addresses to the additional section. - additional := additionalProcessing(z, rrs, do) + additional := z.additionalProcessing(rrs, do) if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, qtype) + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, qtype) rrs = append(rrs, sigs...) } - return rrs, z.ns(do), additional, Success + return rrs, ap.ns(do), additional, Success } @@ -194,19 +187,19 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Found wildcard. if wildElem != nil { - auth := z.ns(do) + auth := ap.ns(do) - if rrs := wildElem.Types(dns.TypeCNAME, qname); len(rrs) > 0 { - return z.additionalProcessing(ctx, state, wildElem, rrs) + if rrs := wildElem.TypeForWildcard(dns.TypeCNAME, qname); len(rrs) > 0 { + return z.externalLookup(ctx, state, wildElem, rrs) } - rrs := wildElem.Types(qtype, qname) + rrs := wildElem.TypeForWildcard(qtype, qname) // NODATA response. if len(rrs) == 0 { - ret := z.soa(do) + ret := ap.soa(do) if do { - nsec := z.typeFromElem(wildElem, dns.TypeNSEC, do) + nsec := typeFromElem(wildElem, dns.TypeNSEC, do) ret = append(ret, nsec...) } return nil, ret, nil, Success @@ -214,13 +207,13 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if do { // An NSEC is needed to say no longer name exists under this wildcard. - if deny, found := z.Tree.Prev(qname); found { - nsec := z.typeFromElem(deny, dns.TypeNSEC, do) + if deny, found := tr.Prev(qname); found { + nsec := typeFromElem(deny, dns.TypeNSEC, do) auth = append(auth, nsec...) } - sigs := wildElem.Types(dns.TypeRRSIG, qname) - sigs = signatureForSubType(sigs, qtype) + sigs := wildElem.TypeForWildcard(dns.TypeRRSIG, qname) + sigs = rrutil.SubTypeSignature(sigs, qtype) rrs = append(rrs, sigs...) } @@ -231,19 +224,19 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Hacky way to get around empty-non-terminals. If a longer name does exist, but this qname, does not, it // must be an empty-non-terminal. If so, we do the proper NXDOMAIN handling, but set the rcode to be success. - if x, found := z.Tree.Next(qname); found { + if x, found := tr.Next(qname); found { if dns.IsSubDomain(qname, x.Name()) { rcode = Success } } - ret := z.soa(do) + ret := ap.soa(do) if do { - deny, found := z.Tree.Prev(qname) + deny, found := tr.Prev(qname) if !found { goto Out } - nsec := z.typeFromElem(deny, dns.TypeNSEC, do) + nsec := typeFromElem(deny, dns.TypeNSEC, do) ret = append(ret, nsec...) if rcode != NameError { @@ -256,10 +249,10 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if found { // wildcard denial wildcard := "*." + ce.Name() - if ss, found := z.Tree.Prev(wildcard); found { + if ss, found := tr.Prev(wildcard); found { // Only add this nsec if it is different than the one already added if ss.Name() != deny.Name() { - nsec := z.typeFromElem(ss, dns.TypeNSEC, do) + nsec := typeFromElem(ss, dns.TypeNSEC, do) ret = append(ret, nsec...) } } @@ -270,112 +263,94 @@ Out: return nil, ret, nil, rcode } -// Return type tp from e and add signatures (if they exists) and do is true. -func (z *Zone) typeFromElem(elem *tree.Elem, tp uint16, do bool) []dns.RR { - rrs := elem.Types(tp) +// typeFromElem returns the type tp from e and adds signatures (if they exist) and do is true. +func typeFromElem(elem *tree.Elem, tp uint16, do bool) []dns.RR { + rrs := elem.Type(tp) if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, tp) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, tp) + rrs = append(rrs, sigs...) } return rrs } -func (z *Zone) soa(do bool) []dns.RR { +func (a Apex) soa(do bool) []dns.RR { if do { - ret := append([]dns.RR{z.Apex.SOA}, z.Apex.SIGSOA...) + ret := append([]dns.RR{a.SOA}, a.SIGSOA...) return ret } - return []dns.RR{z.Apex.SOA} + return []dns.RR{a.SOA} } -func (z *Zone) ns(do bool) []dns.RR { +func (a Apex) ns(do bool) []dns.RR { if do { - ret := append(z.Apex.NS, z.Apex.SIGNS...) + ret := append(a.NS, a.SIGNS...) return ret } - return z.Apex.NS + return a.NS } -// aditionalProcessing adds signatures and tries to resolve CNAMEs that point to external names. -func (z *Zone) additionalProcessing(ctx context.Context, state request.Request, elem *tree.Elem, rrs []dns.RR) ([]dns.RR, []dns.RR, []dns.RR, Result) { +// externalLookup adds signatures and tries to resolve CNAMEs that point to external names. +func (z *Zone) externalLookup(ctx context.Context, state request.Request, elem *tree.Elem, rrs []dns.RR) ([]dns.RR, []dns.RR, []dns.RR, Result) { qtype := state.QType() do := state.Do() if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeCNAME) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeCNAME) + rrs = append(rrs, sigs...) } targetName := rrs[0].(*dns.CNAME).Target elem, _ = z.Tree.Search(targetName) if elem == nil { - rrs = append(rrs, z.externalLookup(ctx, state, targetName, qtype)...) - return rrs, z.ns(do), nil, Success + rrs = append(rrs, z.doLookup(ctx, state, targetName, qtype)...) + return rrs, z.Apex.ns(do), nil, Success } i := 0 Redo: - cname := elem.Types(dns.TypeCNAME) + cname := elem.Type(dns.TypeCNAME) if len(cname) > 0 { rrs = append(rrs, cname...) if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeCNAME) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeCNAME) + rrs = append(rrs, sigs...) } targetName := cname[0].(*dns.CNAME).Target elem, _ = z.Tree.Search(targetName) if elem == nil { - rrs = append(rrs, z.externalLookup(ctx, state, targetName, qtype)...) - return rrs, z.ns(do), nil, Success + rrs = append(rrs, z.doLookup(ctx, state, targetName, qtype)...) + return rrs, z.Apex.ns(do), nil, Success } i++ - if i > maxChain { - return rrs, z.ns(do), nil, Success + if i > 8 { + return rrs, z.Apex.ns(do), nil, Success } goto Redo } - targets := cnameForType(elem.All(), qtype) + targets := rrutil.CNAMEForType(elem.All(), qtype) if len(targets) > 0 { rrs = append(rrs, targets...) if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, qtype) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, qtype) + rrs = append(rrs, sigs...) } } - return rrs, z.ns(do), nil, Success + return rrs, z.Apex.ns(do), nil, Success } -func cnameForType(targets []dns.RR, origQtype uint16) []dns.RR { - ret := []dns.RR{} - for _, target := range targets { - if target.Header().Rrtype == origQtype { - ret = append(ret, target) - } - } - return ret -} - -func (z *Zone) externalLookup(ctx context.Context, state request.Request, target string, qtype uint16) []dns.RR { +func (z *Zone) doLookup(ctx context.Context, state request.Request, target string, qtype uint16) []dns.RR { m, e := z.Upstream.Lookup(ctx, state, target, qtype) if e != nil { return nil @@ -386,59 +361,9 @@ func (z *Zone) externalLookup(ctx context.Context, state request.Request, target return m.Answer } -// signatureForSubType range through the signature and return the correct ones for the subtype. -func signatureForSubType(rrs []dns.RR, subtype uint16) []dns.RR { - sigs := []dns.RR{} - for _, sig := range rrs { - if s, ok := sig.(*dns.RRSIG); ok { - if s.TypeCovered == subtype { - sigs = append(sigs, s) - } - } - } - return sigs -} - -// Glue returns any potential glue records for nsrrs. -func (z *Zone) Glue(nsrrs []dns.RR, do bool) []dns.RR { - glue := []dns.RR{} - for _, rr := range nsrrs { - if ns, ok := rr.(*dns.NS); ok && dns.IsSubDomain(ns.Header().Name, ns.Ns) { - glue = append(glue, z.searchGlue(ns.Ns, do)...) - } - } - return glue -} - -// searchGlue looks up A and AAAA for name. -func (z *Zone) searchGlue(name string, do bool) []dns.RR { - glue := []dns.RR{} - - // A - if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Types(dns.TypeA)...) - if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeA) - glue = append(glue, sigs...) - } - } - - // AAAA - if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Types(dns.TypeAAAA)...) - if do { - sigs := elem.Types(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeAAAA) - glue = append(glue, sigs...) - } - } - return glue -} - // additionalProcessing checks the current answer section and retrieves A or AAAA records // (and possible SIGs) to need to be put in the additional section. -func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { +func (z *Zone) additionalProcessing(answer []dns.RR, do bool) (extra []dns.RR) { for _, rr := range answer { name := "" switch x := rr.(type) { @@ -447,7 +372,7 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { case *dns.MX: name = x.Mx } - if !dns.IsSubDomain(z.origin, name) { + if len(name) == 0 || !dns.IsSubDomain(z.origin, name) { continue } @@ -456,12 +381,12 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { continue } - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) for _, addr := range []uint16{dns.TypeA, dns.TypeAAAA} { - if a := elem.Types(addr); a != nil { + if a := elem.Type(addr); a != nil { extra = append(extra, a...) if do { - sig := signatureForSubType(sigs, addr) + sig := rrutil.SubTypeSignature(sigs, addr) extra = append(extra, sig...) } } @@ -470,5 +395,3 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { return extra } - -const maxChain = 8 diff --git a/plugin/file/notify.go b/plugin/file/notify.go index ce6a0b09588..83d73ee6f7a 100644 --- a/plugin/file/notify.go +++ b/plugin/file/notify.go @@ -53,10 +53,9 @@ func notify(zone string, to []string) error { } if err := notifyAddr(c, m, t); err != nil { log.Error(err.Error()) - } else { - log.Infof("Sent notify for zone %q to %q", zone, t) } } + log.Infof("Sent notifies for zone %q to %v", zone, to) return nil } diff --git a/plugin/file/reload.go b/plugin/file/reload.go index e73c5b87df9..79db040feeb 100644 --- a/plugin/file/reload.go +++ b/plugin/file/reload.go @@ -5,29 +5,17 @@ import ( "time" ) -// TickTime is clock resolution. By default ticks every second. Handler checks if reloadInterval has been reached on every tick. -var TickTime = 1 * time.Second - // Reload reloads a zone when it is changed on disk. If z.NoReload is true, no reloading will be done. func (z *Zone) Reload() error { if z.ReloadInterval == 0 { return nil } - tick := time.NewTicker(TickTime) + tick := time.NewTicker(z.ReloadInterval) go func() { - for { select { - case <-tick.C: - if z.LastReloaded.Add(z.ReloadInterval).After(time.Now()) { - //reload interval not reached yet - continue - } - //saving timestamp of last attempted reload - z.LastReloaded = time.Now() - zFile := z.File() reader, err := os.Open(zFile) if err != nil { @@ -37,6 +25,7 @@ func (z *Zone) Reload() error { serial := z.SOASerialIfDefined() zone, err := Parse(reader, z.origin, zFile, serial) + reader.Close() if err != nil { if _, ok := err.(*serialErr); !ok { log.Errorf("Parsing zone %q: %v", z.origin, err) @@ -45,12 +34,12 @@ func (z *Zone) Reload() error { } // copy elements we need - z.reloadMu.Lock() + z.Lock() z.Apex = zone.Apex z.Tree = zone.Tree - z.reloadMu.Unlock() + z.Unlock() - log.Infof("Successfully reloaded zone %q in %q with serial %d", z.origin, zFile, z.Apex.SOA.Serial) + log.Infof("Successfully reloaded zone %q in %q with %d SOA serial", z.origin, zFile, z.Apex.SOA.Serial) z.Notify() case <-z.reloadShutdown: @@ -62,11 +51,10 @@ func (z *Zone) Reload() error { return nil } -// SOASerialIfDefined returns the SOA's serial if the zone has a SOA record in the Apex, or -// -1 otherwise. +// SOASerialIfDefined returns the SOA's serial if the zone has a SOA record in the Apex, or -1 otherwise. func (z *Zone) SOASerialIfDefined() int64 { - z.reloadMu.Lock() - defer z.reloadMu.Unlock() + z.RLock() + defer z.RUnlock() if z.Apex.SOA != nil { return int64(z.Apex.SOA.Serial) } diff --git a/plugin/file/reload_test.go b/plugin/file/reload_test.go index 1139b8a4410..f9e544372c2 100644 --- a/plugin/file/reload_test.go +++ b/plugin/file/reload_test.go @@ -29,7 +29,6 @@ func TestZoneReload(t *testing.T) { t.Fatalf("Failed to parse zone: %s", err) } - TickTime = 500 * time.Millisecond z.ReloadInterval = 500 * time.Millisecond z.Reload() time.Sleep(time.Second) @@ -49,8 +48,12 @@ func TestZoneReload(t *testing.T) { t.Fatalf("Failed to lookup, got %d", res) } - if len(z.All()) != 5 { - t.Fatalf("Expected 5 RRs, got %d", len(z.All())) + rrs, err := z.ApexIfDefined() // all apex records. + if err != nil { + t.Fatal(err) + } + if len(rrs) != 5 { + t.Fatalf("Expected 5 RRs, got %d", len(rrs)) } if err := ioutil.WriteFile(fileName, []byte(reloadZone2Test), 0644); err != nil { t.Fatalf("Failed to write new zone data: %s", err) @@ -58,8 +61,12 @@ func TestZoneReload(t *testing.T) { // Could still be racy, but we need to wait a bit for the event to be seen time.Sleep(1 * time.Second) - if len(z.All()) != 3 { - t.Fatalf("Expected 3 RRs, got %d", len(z.All())) + rrs, err = z.ApexIfDefined() + if err != nil { + t.Fatal(err) + } + if len(rrs) != 3 { + t.Fatalf("Expected 3 RRs, got %d", len(rrs)) } } diff --git a/plugin/file/rrutil/util.go b/plugin/file/rrutil/util.go new file mode 100644 index 00000000000..63e447196a6 --- /dev/null +++ b/plugin/file/rrutil/util.go @@ -0,0 +1,29 @@ +// Package rrutil provides function to find certain RRs in slices. +package rrutil + +import "github.com/miekg/dns" + +// SubTypeSignature returns the RRSIG for the subtype. +func SubTypeSignature(rrs []dns.RR, subtype uint16) []dns.RR { + sigs := []dns.RR{} + // there may be multiple keys that have signed this subtype + for _, sig := range rrs { + if s, ok := sig.(*dns.RRSIG); ok { + if s.TypeCovered == subtype { + sigs = append(sigs, s) + } + } + } + return sigs +} + +// CNAMEForType returns the RR that have the qtype from targets. +func CNAMEForType(rrs []dns.RR, qtype uint16) []dns.RR { + ret := []dns.RR{} + for _, target := range rrs { + if target.Header().Rrtype == qtype { + ret = append(ret, target) + } + } + return ret +} diff --git a/plugin/file/secondary.go b/plugin/file/secondary.go index ed94daad6f5..6cb571fe54b 100644 --- a/plugin/file/secondary.go +++ b/plugin/file/secondary.go @@ -51,11 +51,11 @@ Transfer: return Err } - z.apexMu.Lock() + z.Lock() z.Tree = z1.Tree z.Apex = z1.Apex - *z.Expired = false - z.apexMu.Unlock() + z.Expired = false + z.Unlock() log.Infof("Transferred: %s from %s", z.origin, tr) return nil } @@ -129,7 +129,7 @@ Restart: if !retryActive { break } - *z.Expired = true + z.Expired = true case <-retryTicker.C: if !retryActive { diff --git a/plugin/file/secondary_test.go b/plugin/file/secondary_test.go index db98b4f97a6..820c9b9d0e7 100644 --- a/plugin/file/secondary_test.go +++ b/plugin/file/secondary_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -71,18 +72,12 @@ const testZone = "secondary.miek.nl." func TestShouldTransfer(t *testing.T) { soa := soa{250} - dns.HandleFunc(testZone, soa.Handler) - defer dns.HandleRemove(testZone) - - s, addrstr, err := test.TCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() + s := dnstest.NewServer(soa.Handler) + defer s.Close() z := NewZone("testzone", "test") z.origin = testZone - z.TransferFrom = []string{addrstr} + z.TransferFrom = []string{s.Addr} // when we have a nil SOA (initial state) should, err := z.shouldTransfer() @@ -115,22 +110,14 @@ func TestShouldTransfer(t *testing.T) { func TestTransferIn(t *testing.T) { soa := soa{250} - dns.HandleFunc(testZone, soa.Handler) - defer dns.HandleRemove(testZone) - - s, addrstr, err := test.TCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() + s := dnstest.NewServer(soa.Handler) + defer s.Close() z := new(Zone) - z.Expired = new(bool) z.origin = testZone - z.TransferFrom = []string{addrstr} + z.TransferFrom = []string{s.Addr} - err = z.TransferIn() - if err != nil { + if err := z.TransferIn(); err != nil { t.Fatalf("Unable to run TransferIn: %v", err) } if z.Apex.SOA.String() != fmt.Sprintf("%s 3600 IN SOA bla. bla. 250 0 0 0 0", testZone) { @@ -140,7 +127,6 @@ func TestTransferIn(t *testing.T) { func TestIsNotify(t *testing.T) { z := new(Zone) - z.Expired = new(bool) z.origin = testZone state := newRequest(testZone, dns.TypeSOA) // need to set opcode diff --git a/plugin/file/setup.go b/plugin/file/setup.go index 38ba796217b..44ecf2ca1e9 100644 --- a/plugin/file/setup.go +++ b/plugin/file/setup.go @@ -13,12 +13,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("file", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("file", setup) } func setup(c *caddy.Controller) error { zones, err := fileParse(c) @@ -57,6 +52,9 @@ func fileParse(c *caddy.Controller) (Zones, error) { config := dnsserver.GetConfig(c) + var openErr error + reload := 1 * time.Minute + for c.Next() { // file db.file [zones...] if !c.NextArg() { @@ -77,23 +75,24 @@ func fileParse(c *caddy.Controller) (Zones, error) { reader, err := os.Open(fileName) if err != nil { - // bail out - return Zones{}, err + openErr = err } for i := range origins { origins[i] = plugin.Host(origins[i]).Normalize() - zone, err := Parse(reader, origins[i], fileName, 0) - if err == nil { - z[origins[i]] = zone - } else { - return Zones{}, err + z[origins[i]] = NewZone(origins[i], fileName) + if openErr == nil { + reader.Seek(0, 0) + zone, err := Parse(reader, origins[i], fileName, 0) + if err == nil { + z[origins[i]] = zone + } else { + return Zones{}, err + } } names = append(names, origins[i]) } - reload := 1 * time.Minute - upstr := upstream.New() t := []string{} var e error @@ -124,10 +123,22 @@ func fileParse(c *caddy.Controller) (Zones, error) { if t != nil { z[origin].TransferTo = append(z[origin].TransferTo, t...) } - z[origin].ReloadInterval = reload - z[origin].Upstream = upstr } } } + + for origin := range z { + z[origin].ReloadInterval = reload + z[origin].Upstream = upstream.New() + } + + if openErr != nil { + if reload == 0 { + // reload hasn't been set make this a fatal error + return Zones{}, plugin.Error("file", openErr) + } + log.Warningf("Failed to open %q: trying again in %s", openErr, reload) + + } return Zones{Z: z, Names: names}, nil } diff --git a/plugin/file/setup_test.go b/plugin/file/setup_test.go index f6252759bfb..6a1d5e7906b 100644 --- a/plugin/file/setup_test.go +++ b/plugin/file/setup_test.go @@ -2,6 +2,7 @@ package file import ( "testing" + "time" "github.com/coredns/coredns/plugin/test" @@ -90,3 +91,35 @@ func TestFileParse(t *testing.T) { } } } + +func TestParseReload(t *testing.T) { + name, rm, err := test.TempFile(".", dbMiekNL) + if err != nil { + t.Fatal(err) + } + defer rm() + + tests := []struct { + input string + reload time.Duration + }{ + { + `file ` + name + ` example.org.`, + 1 * time.Minute, + }, + { + `file ` + name + ` example.org. { + reload 5s + }`, + 5 * time.Second, + }, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.input) + z, _ := fileParse(c) + if x := z.Z["example.org."].ReloadInterval; x != test.reload { + t.Errorf("Test %d expected reload to be %s, but got %s", i, test.reload, x) + } + } +} diff --git a/plugin/file/tree/all.go b/plugin/file/tree/all.go index fd806365f38..e1fc5b392c3 100644 --- a/plugin/file/tree/all.go +++ b/plugin/file/tree/all.go @@ -1,6 +1,6 @@ package tree -// All traverses tree and returns all elements +// All traverses tree and returns all elements. func (t *Tree) All() []*Elem { if t.Root == nil { return nil @@ -19,30 +19,3 @@ func (n *Node) all(found []*Elem) []*Elem { } return found } - -// Do performs fn on all values stored in the tree. A boolean is returned indicating whether the -// Do traversal was interrupted by an Operation returning true. If fn alters stored values' sort -// relationships, future tree operation behaviors are undefined. -func (t *Tree) Do(fn func(e *Elem) bool) bool { - if t.Root == nil { - return false - } - return t.Root.do(fn) -} - -func (n *Node) do(fn func(e *Elem) bool) (done bool) { - if n.Left != nil { - done = n.Left.do(fn) - if done { - return - } - } - done = fn(n.Elem) - if done { - return - } - if n.Right != nil { - done = n.Right.do(fn) - } - return -} diff --git a/plugin/file/tree/auth_walk.go b/plugin/file/tree/auth_walk.go new file mode 100644 index 00000000000..1f436716fdb --- /dev/null +++ b/plugin/file/tree/auth_walk.go @@ -0,0 +1,58 @@ +package tree + +import ( + "github.com/miekg/dns" +) + +// AuthWalk performs fn on all authoritative values stored in the tree in +// pre-order depth first. If a non-nil error is returned the AuthWalk was interrupted +// by an fn returning that error. If fn alters stored values' sort +// relationships, future tree operation behaviors are undefined. +// +// The fn function will be called with 3 arguments, the current element, a map containing all +// the RRs for this element and a boolean if this name is considered authoritative. +func (t *Tree) AuthWalk(fn func(*Elem, map[uint16][]dns.RR, bool) error) error { + if t.Root == nil { + return nil + } + return t.Root.authwalk(make(map[string]struct{}), fn) +} + +func (n *Node) authwalk(ns map[string]struct{}, fn func(*Elem, map[uint16][]dns.RR, bool) error) error { + if n.Left != nil { + if err := n.Left.authwalk(ns, fn); err != nil { + return err + } + } + + // Check if the current name is a subdomain of *any* of the delegated names we've seen, if so, skip this name. + // The ordering of the tree and how we walk if guarantees we see parents first. + if n.Elem.Type(dns.TypeNS) != nil { + ns[n.Elem.Name()] = struct{}{} + } + + auth := true + i := 0 + for { + j, end := dns.NextLabel(n.Elem.Name(), i) + if end { + break + } + if _, ok := ns[n.Elem.Name()[j:]]; ok { + auth = false + break + } + i++ + } + + if err := fn(n.Elem, n.Elem.m, auth); err != nil { + return err + } + + if n.Right != nil { + if err := n.Right.authwalk(ns, fn); err != nil { + return err + } + } + return nil +} diff --git a/plugin/file/tree/elem.go b/plugin/file/tree/elem.go index 6317cc91237..c1909649d68 100644 --- a/plugin/file/tree/elem.go +++ b/plugin/file/tree/elem.go @@ -15,20 +15,34 @@ func newElem(rr dns.RR) *Elem { return &e } -// Types returns the RRs with type qtype from e. If qname is given (only the -// first one is used), the RR are copied and the owner is replaced with qname[0]. -func (e *Elem) Types(qtype uint16, qname ...string) []dns.RR { +// Types returns the types of the records in e. The returned list is not sorted. +func (e *Elem) Types() []uint16 { + t := make([]uint16, len(e.m)) + i := 0 + for ty := range e.m { + t[i] = ty + i++ + } + return t +} + +// Type returns the RRs with type qtype from e. +func (e *Elem) Type(qtype uint16) []dns.RR { return e.m[qtype] } + +// TypeForWildcard returns the RRs with type qtype from e. The ownername returned is set to qname. +func (e *Elem) TypeForWildcard(qtype uint16, qname string) []dns.RR { rrs := e.m[qtype] - if rrs != nil && len(qname) > 0 { - copied := make([]dns.RR, len(rrs)) - for i := range rrs { - copied[i] = dns.Copy(rrs[i]) - copied[i].Header().Name = qname[0] - } - return copied + if rrs == nil { + return nil + } + + copied := make([]dns.RR, len(rrs)) + for i := range rrs { + copied[i] = dns.Copy(rrs[i]) + copied[i].Header().Name = qname } - return rrs + return copied } // All returns all RRs from e, regardless of type. @@ -52,13 +66,10 @@ func (e *Elem) Name() string { return "" } -// Empty returns true is e does not contain any RRs, i.e. is an -// empty-non-terminal. -func (e *Elem) Empty() bool { - return len(e.m) == 0 -} +// Empty returns true is e does not contain any RRs, i.e. is an empty-non-terminal. +func (e *Elem) Empty() bool { return len(e.m) == 0 } -// Insert inserts rr into e. If rr is equal to existing rrs this is a noop. +// Insert inserts rr into e. If rr is equal to existing RRs, the RR will be added anyway. func (e *Elem) Insert(rr dns.RR) { t := rr.Header().Rrtype if e.m == nil { @@ -71,66 +82,20 @@ func (e *Elem) Insert(rr dns.RR) { e.m[t] = []dns.RR{rr} return } - for _, er := range rrs { - if equalRdata(er, rr) { - return - } - } rrs = append(rrs, rr) e.m[t] = rrs } -// Delete removes rr from e. When e is empty after the removal the returned bool is true. -func (e *Elem) Delete(rr dns.RR) (empty bool) { +// Delete removes all RRs of type rr.Header().Rrtype from e. +func (e *Elem) Delete(rr dns.RR) { if e.m == nil { - return true - } - - t := rr.Header().Rrtype - rrs, ok := e.m[t] - if !ok { return } - for i, er := range rrs { - if equalRdata(er, rr) { - rrs = removeFromSlice(rrs, i) - e.m[t] = rrs - empty = len(rrs) == 0 - if empty { - delete(e.m, t) - } - return - } - } - return + t := rr.Header().Rrtype + delete(e.m, t) } // Less is a tree helper function that calls less. func Less(a *Elem, name string) int { return less(name, a.Name()) } - -// Assuming the same type and name this will check if the rdata is equal as well. -func equalRdata(a, b dns.RR) bool { - switch x := a.(type) { - // TODO(miek): more types, i.e. all types. + tests for this. - case *dns.A: - return x.A.Equal(b.(*dns.A).A) - case *dns.AAAA: - return x.AAAA.Equal(b.(*dns.AAAA).AAAA) - case *dns.MX: - if x.Mx == b.(*dns.MX).Mx && x.Preference == b.(*dns.MX).Preference { - return true - } - } - return false -} - -// removeFromSlice removes index i from the slice. -func removeFromSlice(rrs []dns.RR, i int) []dns.RR { - if i >= len(rrs) { - return rrs - } - rrs = append(rrs[:i], rrs[i+1:]...) - return rrs -} diff --git a/plugin/file/tree/glue.go b/plugin/file/tree/glue.go new file mode 100644 index 00000000000..937ae548202 --- /dev/null +++ b/plugin/file/tree/glue.go @@ -0,0 +1,44 @@ +package tree + +import ( + "github.com/coredns/coredns/plugin/file/rrutil" + + "github.com/miekg/dns" +) + +// Glue returns any potential glue records for nsrrs. +func (t *Tree) Glue(nsrrs []dns.RR, do bool) []dns.RR { + glue := []dns.RR{} + for _, rr := range nsrrs { + if ns, ok := rr.(*dns.NS); ok && dns.IsSubDomain(ns.Header().Name, ns.Ns) { + glue = append(glue, t.searchGlue(ns.Ns, do)...) + } + } + return glue +} + +// searchGlue looks up A and AAAA for name. +func (t *Tree) searchGlue(name string, do bool) []dns.RR { + glue := []dns.RR{} + + // A + if elem, found := t.Search(name); found { + glue = append(glue, elem.Type(dns.TypeA)...) + if do { + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeA) + glue = append(glue, sigs...) + } + } + + // AAAA + if elem, found := t.Search(name); found { + glue = append(glue, elem.Type(dns.TypeAAAA)...) + if do { + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeAAAA) + glue = append(glue, sigs...) + } + } + return glue +} diff --git a/plugin/file/tree/tree.go b/plugin/file/tree/tree.go index ed33c09a488..a6caafe163e 100644 --- a/plugin/file/tree/tree.go +++ b/plugin/file/tree/tree.go @@ -275,7 +275,8 @@ func (n *Node) deleteMax() (root *Node, d int) { return } -// Delete removes rr from the tree, is the node turns empty, that node is deleted with DeleteNode. +// Delete removes all RRs of type rr.Header().Rrtype from e. If after the deletion of rr the node is empty the +// entire node is deleted. func (t *Tree) Delete(rr dns.RR) { if t.Root == nil { return @@ -283,14 +284,11 @@ func (t *Tree) Delete(rr dns.RR) { el, _ := t.Search(rr.Header().Name) if el == nil { - t.deleteNode(rr) return } - // Delete from this element. - empty := el.Delete(rr) - if empty { + el.Delete(rr) + if el.Empty() { t.deleteNode(rr) - return } } diff --git a/plugin/file/tree/walk.go b/plugin/file/tree/walk.go new file mode 100644 index 00000000000..e315eb03fa7 --- /dev/null +++ b/plugin/file/tree/walk.go @@ -0,0 +1,33 @@ +package tree + +import "github.com/miekg/dns" + +// Walk performs fn on all authoritative values stored in the tree in +// in-order depth first. If a non-nil error is returned the Walk was interrupted +// by an fn returning that error. If fn alters stored values' sort +// relationships, future tree operation behaviors are undefined. +func (t *Tree) Walk(fn func(*Elem, map[uint16][]dns.RR) error) error { + if t.Root == nil { + return nil + } + return t.Root.walk(fn) +} + +func (n *Node) walk(fn func(*Elem, map[uint16][]dns.RR) error) error { + if n.Left != nil { + if err := n.Left.walk(fn); err != nil { + return err + } + } + + if err := fn(n.Elem, n.Elem.m); err != nil { + return err + } + + if n.Right != nil { + if err := n.Right.walk(fn); err != nil { + return err + } + } + return nil +} diff --git a/plugin/file/xfr.go b/plugin/file/xfr.go index 08f71030aa6..f7192165ba2 100644 --- a/plugin/file/xfr.go +++ b/plugin/file/xfr.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file/tree" "github.com/coredns/coredns/request" "github.com/miekg/dns" @@ -26,41 +27,92 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in return 0, plugin.Error(x.Name(), fmt.Errorf("xfr called with non transfer type: %d", state.QType())) } - records := x.All() - if len(records) == 0 { + // For IXFR we take the SOA in the IXFR message (if there), compare it what we have and then decide to do an + // AXFR or just reply with one SOA message back. + if state.QType() == dns.TypeIXFR { + code, _ := x.ServeIxfr(ctx, w, r) + if plugin.ClientWrite(code) { + return code, nil + } + } + + // get soa and apex + apex, err := x.ApexIfDefined() + if err != nil { return dns.RcodeServerFailure, nil } ch := make(chan *dns.Envelope) tr := new(dns.Transfer) wg := new(sync.WaitGroup) + wg.Add(1) go func() { - wg.Add(1) tr.Out(w, r, ch) wg.Done() }() - j, l := 0, 0 - records = append(records, records[0]) // add closing SOA to the end - log.Infof("Outgoing transfer of %d records of zone %s to %s started", len(records), x.origin, state.IP()) - for i, r := range records { - l += dns.Len(r) - if l > transferLength { - ch <- &dns.Envelope{RR: records[j:i]} - l = 0 - j = i + rrs := []dns.RR{} + l := len(apex) + + ch <- &dns.Envelope{RR: apex} + + x.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + rrs = append(rrs, e.All()...) + if len(rrs) > 500 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} } + return nil + }) + + if len(rrs) > 0 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} } - if j < len(records) { - ch <- &dns.Envelope{RR: records[j:]} - } + + ch <- &dns.Envelope{RR: []dns.RR{apex[0]}} // closing SOA. + l++ + close(ch) // Even though we close the channel here, we still have wg.Wait() // to wait before we can return and close the connection. + log.Infof("Outgoing transfer of %d records of zone %s to %s done with %d SOA serial", l, x.origin, state.IP(), apex[0].(*dns.SOA).Serial) return dns.RcodeSuccess, nil } // Name implements the plugin.Handler interface. func (x Xfr) Name() string { return "xfr" } -const transferLength = 1000 // Start a new envelop after message reaches this size in bytes. Intentionally small to test multi envelope parsing. +// ServeIxfr checks if we need to serve a simpler IXFR for the incoming message. +// See RFC 1995 Section 3: "... and the authority section containing the SOA record of client's version of the zone." +// and Section 2, paragraph 4 where we only need to echo the SOA record back. +// This function must be called when the qtype is IXFR. It returns a plugin.ClientWrite(code) == false, when it didn't +// write anything and we should perform an AXFR. +func (x Xfr) ServeIxfr(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if len(r.Ns) != 1 { + return dns.RcodeServerFailure, nil + } + soa, ok := r.Ns[0].(*dns.SOA) + if !ok { + return dns.RcodeServerFailure, nil + } + + x.RLock() + if x.Apex.SOA == nil { + x.RUnlock() + return dns.RcodeServerFailure, nil + } + serial := x.Apex.SOA.Serial + x.RUnlock() + + if soa.Serial == serial { // Section 2, para 4; echo SOA back. We have the same zone + m := new(dns.Msg) + m.SetReply(r) + m.Answer = []dns.RR{soa} + w.WriteMsg(m) + return 0, nil + } + return dns.RcodeServerFailure, nil +} diff --git a/plugin/file/xfr_test.go b/plugin/file/xfr_test.go index 69ad68e6405..c02575556b9 100644 --- a/plugin/file/xfr_test.go +++ b/plugin/file/xfr_test.go @@ -3,6 +3,7 @@ package file import ( "fmt" "strings" + "testing" ) func ExampleZone_All() { @@ -32,3 +33,11 @@ func ExampleZone_All() { // xfr_test.go:15: a.miek.nl. 1800 IN A 139.162.196.78 // xfr_test.go:15: a.miek.nl. 1800 IN AAAA 2a01:7e00::f03c:91ff:fef1:6735 } + +func TestAllNewZone(t *testing.T) { + zone := NewZone("example.org.", "stdin") + records := zone.All() + if len(records) != 0 { + t.Errorf("Expected %d records in empty zone, got %d", 0, len(records)) + } +} diff --git a/plugin/file/zone.go b/plugin/file/zone.go index b24cd91e89d..62720abb490 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -15,25 +15,25 @@ import ( "github.com/miekg/dns" ) -// Zone defines a structure that contains all data related to a DNS zone. +// Zone is a structure that contains all data related to a DNS zone. type Zone struct { origin string origLen int file string *tree.Tree Apex - apexMu sync.RWMutex + Expired bool + + sync.RWMutex TransferTo []string StartupOnce sync.Once TransferFrom []string - Expired *bool ReloadInterval time.Duration - LastReloaded time.Time - reloadMu sync.RWMutex reloadShutdown chan bool - Upstream *upstream.Upstream // Upstream for looking up external names during the resolution process. + + Upstream *upstream.Upstream // Upstream for looking up external names during the resolution process. } // Apex contains the apex records of a zone: SOA, NS and their potential signatures. @@ -46,17 +46,13 @@ type Apex struct { // NewZone returns a new zone. func NewZone(name, file string) *Zone { - z := &Zone{ + return &Zone{ origin: dns.Fqdn(name), origLen: dns.CountLabel(dns.Fqdn(name)), file: filepath.Clean(file), Tree: &tree.Tree{}, - Expired: new(bool), reloadShutdown: make(chan bool), - LastReloaded: time.Now(), } - *z.Expired = false - return z } // Copy copies a zone. @@ -124,21 +120,18 @@ func (z *Zone) Insert(r dns.RR) error { return nil } -// Delete deletes r from z. -func (z *Zone) Delete(r dns.RR) { z.Tree.Delete(r) } - -// File retrieves the file path in a safe way +// File retrieves the file path in a safe way. func (z *Zone) File() string { - z.reloadMu.Lock() - defer z.reloadMu.Unlock() + z.RLock() + defer z.RUnlock() return z.file } -// SetFile updates the file path in a safe way +// SetFile updates the file path in a safe way. func (z *Zone) SetFile(path string) { - z.reloadMu.Lock() + z.Lock() z.file = path - z.reloadMu.Unlock() + z.Unlock() } // TransferAllowed checks if incoming request for transferring the zone is allowed according to the ACLs. @@ -161,29 +154,27 @@ func (z *Zone) TransferAllowed(state request.Request) bool { return false } -// All returns all records from the zone, the first record will be the SOA record, -// otionally followed by all RRSIG(SOA)s. -func (z *Zone) All() []dns.RR { - if z.ReloadInterval > 0 { - z.reloadMu.RLock() - defer z.reloadMu.RUnlock() +// ApexIfDefined returns the apex nodes from z. The SOA record is the first record, if it does not exist, an error is returned. +func (z *Zone) ApexIfDefined() ([]dns.RR, error) { + z.RLock() + defer z.RUnlock() + if z.Apex.SOA == nil { + return nil, fmt.Errorf("no SOA") } - records := []dns.RR{} - allNodes := z.Tree.All() - for _, a := range allNodes { - records = append(records, a.All()...) - } + rrs := []dns.RR{z.Apex.SOA} + if len(z.Apex.SIGSOA) > 0 { + rrs = append(rrs, z.Apex.SIGSOA...) + } + if len(z.Apex.NS) > 0 { + rrs = append(rrs, z.Apex.NS...) + } if len(z.Apex.SIGNS) > 0 { - records = append(z.Apex.SIGNS, records...) + rrs = append(rrs, z.Apex.SIGNS...) } - records = append(z.Apex.NS, records...) - if len(z.Apex.SIGSOA) > 0 { - records = append(z.Apex.SIGSOA, records...) - } - return append([]dns.RR{z.Apex.SOA}, records...) + return rrs, nil } // NameFromRight returns the labels from the right, staring with the diff --git a/plugin/forward/OWNERS b/plugin/forward/OWNERS deleted file mode 100644 index 201f579921c..00000000000 --- a/plugin/forward/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -reviewers: - - grobie - - johnbelamaric - - miekg - - rdrozhdzh -approvers: - - grobie - - johnbelamaric - - rdrozhdzh - - miekg diff --git a/plugin/forward/README.md b/plugin/forward/README.md index 94e306ea848..f9c2f482b56 100644 --- a/plugin/forward/README.md +++ b/plugin/forward/README.md @@ -94,19 +94,17 @@ On each endpoint, the timeouts of the communication are set by default and autom ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric are exported: * `coredns_forward_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_forward_request_count_total{to}` - query count per upstream. -* `coredns_forward_response_rcode_total{to, rcode}` - count of RCODEs per upstream. +* `coredns_forward_response_rcode_count_total{to, rcode}` - count of RCODEs per upstream. * `coredns_forward_healthcheck_failure_count_total{to}` - number of failed health checks per upstream. * `coredns_forward_healthcheck_broken_count_total{}` - counter of when all upstreams are unhealthy, and we are randomly (this always uses the `random` policy) spraying to an upstream. -* `coredns_forward_socket_count_total{to}` - number of cached sockets per upstream. -Where `to` is one of the upstream servers (**TO** from the config), `proto` is the protocol used by -the incoming query ("tcp" or "udp"), and family the transport family ("1" for IPv4, and "2" for -IPv6). +Where `to` is one of the upstream servers (**TO** from the config), `rcode` is the returned RCODE +from the upstream. ## Examples diff --git a/plugin/forward/connect.go b/plugin/forward/connect.go index 8fde2224bf5..9ac1afe1641 100644 --- a/plugin/forward/connect.go +++ b/plugin/forward/connect.go @@ -44,17 +44,17 @@ func (t *Transport) updateDialTimeout(newDialTime time.Duration) { } // Dial dials the address configured in transport, potentially reusing a connection or creating a new one. -func (t *Transport) Dial(proto string) (*dns.Conn, bool, error) { +func (t *Transport) Dial(proto string) (*persistConn, bool, error) { // If tls has been configured; use it. if t.tlsConfig != nil { proto = "tcp-tls" } t.dial <- proto - c := <-t.ret + pc := <-t.ret - if c != nil { - return c, true, nil + if pc != nil { + return pc, true, nil } reqTime := time.Now() @@ -62,11 +62,11 @@ func (t *Transport) Dial(proto string) (*dns.Conn, bool, error) { if proto == "tcp-tls" { conn, err := dns.DialTimeoutWithTLS("tcp", t.addr, t.tlsConfig, timeout) t.updateDialTimeout(time.Since(reqTime)) - return conn, false, err + return &persistConn{c: conn}, false, err } conn, err := dns.DialTimeout(proto, t.addr, timeout) t.updateDialTimeout(time.Since(reqTime)) - return conn, false, err + return &persistConn{c: conn}, false, err } // Connect selects an upstream, sends the request and waits for a response. @@ -83,20 +83,20 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options proto = state.Proto() } - conn, cached, err := p.transport.Dial(proto) + pc, cached, err := p.transport.Dial(proto) if err != nil { return nil, err } // Set buffer size correctly for this client. - conn.UDPSize = uint16(state.Size()) - if conn.UDPSize < 512 { - conn.UDPSize = 512 + pc.c.UDPSize = uint16(state.Size()) + if pc.c.UDPSize < 512 { + pc.c.UDPSize = 512 } - conn.SetWriteDeadline(time.Now().Add(maxTimeout)) - if err := conn.WriteMsg(state.Req); err != nil { - conn.Close() // not giving it back + pc.c.SetWriteDeadline(time.Now().Add(maxTimeout)) + if err := pc.c.WriteMsg(state.Req); err != nil { + pc.c.Close() // not giving it back if err == io.EOF && cached { return nil, ErrCachedClosed } @@ -104,11 +104,11 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options } var ret *dns.Msg - conn.SetReadDeadline(time.Now().Add(readTimeout)) + pc.c.SetReadDeadline(time.Now().Add(readTimeout)) for { - ret, err = conn.ReadMsg() + ret, err = pc.c.ReadMsg() if err != nil { - conn.Close() // not giving it back + pc.c.Close() // not giving it back if err == io.EOF && cached { return nil, ErrCachedClosed } @@ -120,7 +120,7 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options } } - p.transport.Yield(conn) + p.transport.Yield(pc) rc, ok := dns.RcodeToString[ret.Rcode] if !ok { diff --git a/plugin/forward/forward.go b/plugin/forward/forward.go index da2e175fe35..4e54ef466f3 100644 --- a/plugin/forward/forward.go +++ b/plugin/forward/forward.go @@ -89,7 +89,7 @@ func (f *Forward) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg if fails < len(f.proxies) { continue } - // All upstream proxies are dead, assume healtcheck is completely broken and randomly + // All upstream proxies are dead, assume healthcheck is completely broken and randomly // select an upstream to connect to. r := new(random) proxy = r.List(f.proxies)[0] @@ -109,14 +109,11 @@ func (f *Forward) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg opts := f.opts for { ret, err = proxy.Connect(ctx, state, opts) - if err == nil { - break - } if err == ErrCachedClosed { // Remote side closed conn, can only happen with TCP. continue } // Retry with TCP if truncated and prefer_udp configured. - if ret != nil && ret.Truncated && !opts.forceTCP && f.opts.preferUDP { + if ret != nil && ret.Truncated && !opts.forceTCP && opts.preferUDP { opts.forceTCP = true continue } @@ -202,15 +199,6 @@ var ( ErrCachedClosed = errors.New("cached connection was closed by peer") ) -// policy tells forward what policy for selecting upstream it uses. -type policy int - -const ( - randomPolicy policy = iota - roundRobinPolicy - sequentialPolicy -) - // options holds various options that can be set. type options struct { forceTCP bool diff --git a/plugin/forward/fuzz.go b/plugin/forward/fuzz.go new file mode 100644 index 00000000000..0482f63adbf --- /dev/null +++ b/plugin/forward/fuzz.go @@ -0,0 +1,34 @@ +// +build gofuzz + +package forward + +import ( + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fuzz" + + "github.com/miekg/dns" +) + +var f *Forward + +// abuse init to setup an environment to test against. This start another server to that will +// reflect responses. +func init() { + f = New() + s := dnstest.NewServer(r{}.reflectHandler) + f.proxies = append(f.proxies, NewProxy(s.Addr, "tcp")) + f.proxies = append(f.proxies, NewProxy(s.Addr, "udp")) +} + +// Fuzz fuzzes forward. +func Fuzz(data []byte) int { + return fuzz.Do(f, data) +} + +type r struct{} + +func (r r) reflectHandler(w dns.ResponseWriter, req *dns.Msg) { + m := new(dns.Msg) + m.SetReply(req) + w.WriteMsg(m) +} diff --git a/plugin/forward/health_test.go b/plugin/forward/health_test.go index 3d06f283559..b1785eeaedc 100644 --- a/plugin/forward/health_test.go +++ b/plugin/forward/health_test.go @@ -29,7 +29,7 @@ func TestHealth(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -69,7 +69,7 @@ func TestHealthTimeout(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -113,7 +113,7 @@ func TestHealthFailTwice(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -137,7 +137,7 @@ func TestHealthMaxFails(t *testing.T) { f := New() f.maxfails = 2 f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -169,7 +169,7 @@ func TestHealthNoMaxFails(t *testing.T) { f := New() f.maxfails = 0 f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) diff --git a/plugin/forward/metrics.go b/plugin/forward/metrics.go index 0fe470926f4..e120f55fc17 100644 --- a/plugin/forward/metrics.go +++ b/plugin/forward/metrics.go @@ -31,13 +31,13 @@ var ( Namespace: plugin.Namespace, Subsystem: "forward", Name: "healthcheck_failure_count_total", - Help: "Counter of the number of failed healtchecks.", + Help: "Counter of the number of failed healthchecks.", }, []string{"to"}) HealthcheckBrokenCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: plugin.Namespace, Subsystem: "forward", Name: "healthcheck_broken_count_total", - Help: "Counter of the number of complete failures of the healtchecks.", + Help: "Counter of the number of complete failures of the healthchecks.", }) SocketGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: plugin.Namespace, diff --git a/plugin/forward/persistent.go b/plugin/forward/persistent.go index d1348f94d4c..afa95d343cd 100644 --- a/plugin/forward/persistent.go +++ b/plugin/forward/persistent.go @@ -2,7 +2,6 @@ package forward import ( "crypto/tls" - "net" "sort" "time" @@ -17,42 +16,32 @@ type persistConn struct { // Transport hold the persistent cache. type Transport struct { - avgDialTime int64 // kind of average time of dial time - conns map[string][]*persistConn // Buckets for udp, tcp and tcp-tls. - expire time.Duration // After this duration a connection is expired. + avgDialTime int64 // kind of average time of dial time + conns [typeTotalCount][]*persistConn // Buckets for udp, tcp and tcp-tls. + expire time.Duration // After this duration a connection is expired. addr string tlsConfig *tls.Config dial chan string - yield chan *dns.Conn - ret chan *dns.Conn + yield chan *persistConn + ret chan *persistConn stop chan bool } func newTransport(addr string) *Transport { t := &Transport{ avgDialTime: int64(maxDialTimeout / 2), - conns: make(map[string][]*persistConn), + conns: [typeTotalCount][]*persistConn{}, expire: defaultExpire, addr: addr, dial: make(chan string), - yield: make(chan *dns.Conn), - ret: make(chan *dns.Conn), + yield: make(chan *persistConn), + ret: make(chan *persistConn), stop: make(chan bool), } return t } -// len returns the number of connection, used for metrics. Can only be safely -// used inside connManager() because of data races. -func (t *Transport) len() int { - l := 0 - for _, conns := range t.conns { - l += len(conns) - } - return l -} - // connManagers manages the persistent connection cache for UDP and TCP. func (t *Transport) connManager() { ticker := time.NewTicker(t.expire) @@ -60,41 +49,27 @@ Wait: for { select { case proto := <-t.dial: + transtype := stringToTransportType(proto) // take the last used conn - complexity O(1) - if stack := t.conns[proto]; len(stack) > 0 { + if stack := t.conns[transtype]; len(stack) > 0 { pc := stack[len(stack)-1] if time.Since(pc.used) < t.expire { // Found one, remove from pool and return this conn. - t.conns[proto] = stack[:len(stack)-1] - t.ret <- pc.c + t.conns[transtype] = stack[:len(stack)-1] + t.ret <- pc continue Wait } // clear entire cache if the last conn is expired - t.conns[proto] = nil + t.conns[transtype] = nil // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack) } - SocketGauge.WithLabelValues(t.addr).Set(float64(t.len())) - t.ret <- nil - case conn := <-t.yield: - - SocketGauge.WithLabelValues(t.addr).Set(float64(t.len() + 1)) - - // no proto here, infer from config and conn - if _, ok := conn.Conn.(*net.UDPConn); ok { - t.conns["udp"] = append(t.conns["udp"], &persistConn{conn, time.Now()}) - continue Wait - } - - if t.tlsConfig == nil { - t.conns["tcp"] = append(t.conns["tcp"], &persistConn{conn, time.Now()}) - continue Wait - } - - t.conns["tcp-tls"] = append(t.conns["tcp-tls"], &persistConn{conn, time.Now()}) + case pc := <-t.yield: + transtype := t.transportTypeFromConn(pc) + t.conns[transtype] = append(t.conns[transtype], pc) case <-ticker.C: t.cleanup(false) @@ -117,12 +92,12 @@ func closeConns(conns []*persistConn) { // cleanup removes connections from cache. func (t *Transport) cleanup(all bool) { staleTime := time.Now().Add(-t.expire) - for proto, stack := range t.conns { + for transtype, stack := range t.conns { if len(stack) == 0 { continue } if all { - t.conns[proto] = nil + t.conns[transtype] = nil // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack) @@ -136,15 +111,30 @@ func (t *Transport) cleanup(all bool) { good := sort.Search(len(stack), func(i int) bool { return stack[i].used.After(staleTime) }) - t.conns[proto] = stack[good:] + t.conns[transtype] = stack[good:] // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack[:good]) } } +// It is hard to pin a value to this, the import thing is to no block forever, losing at cached connection is not terrible. +const yieldTimeout = 25 * time.Millisecond + // Yield return the connection to transport for reuse. -func (t *Transport) Yield(c *dns.Conn) { t.yield <- c } +func (t *Transport) Yield(pc *persistConn) { + pc.used = time.Now() // update used time + + // Make this non-blocking, because in the case of a very busy forwarder we will *block* on this yield. This + // blocks the outer go-routine and stuff will just pile up. We timeout when the send fails to as returning + // these connection is an optimization anyway. + select { + case t.yield <- pc: + return + case <-time.After(yieldTimeout): + return + } +} // Start starts the transport's connection manager. func (t *Transport) Start() { go t.connManager() } diff --git a/plugin/forward/persistent_test.go b/plugin/forward/persistent_test.go index f1c906076b4..9ea0cdc109d 100644 --- a/plugin/forward/persistent_test.go +++ b/plugin/forward/persistent_test.go @@ -82,54 +82,6 @@ func TestCleanupByTimer(t *testing.T) { tr.Yield(c4) } -func TestPartialCleanup(t *testing.T) { - s := dnstest.NewServer(func(w dns.ResponseWriter, r *dns.Msg) { - ret := new(dns.Msg) - ret.SetReply(r) - w.WriteMsg(ret) - }) - defer s.Close() - - tr := newTransport(s.Addr) - tr.SetExpire(100 * time.Millisecond) - tr.Start() - defer tr.Stop() - - c1, _, _ := tr.Dial("udp") - c2, _, _ := tr.Dial("udp") - c3, _, _ := tr.Dial("udp") - c4, _, _ := tr.Dial("udp") - c5, _, _ := tr.Dial("udp") - - tr.Yield(c1) - time.Sleep(10 * time.Millisecond) - tr.Yield(c2) - time.Sleep(10 * time.Millisecond) - tr.Yield(c3) - time.Sleep(50 * time.Millisecond) - tr.Yield(c4) - time.Sleep(10 * time.Millisecond) - tr.Yield(c5) - time.Sleep(40 * time.Millisecond) - - c6, _, _ := tr.Dial("udp") - if c6 != c5 { - t.Errorf("Expected c6 == c5") - } - c7, _, _ := tr.Dial("udp") - if c7 != c4 { - t.Errorf("Expected c7 == c4") - } - c8, cached, _ := tr.Dial("udp") - if cached { - t.Error("Expected non-cached connection (c8)") - } - - tr.Yield(c6) - tr.Yield(c7) - tr.Yield(c8) -} - func TestCleanupAll(t *testing.T) { s := dnstest.NewServer(func(w dns.ResponseWriter, r *dns.Msg) { ret := new(dns.Msg) @@ -144,18 +96,14 @@ func TestCleanupAll(t *testing.T) { c2, _ := dns.DialTimeout("udp", tr.addr, maxDialTimeout) c3, _ := dns.DialTimeout("udp", tr.addr, maxDialTimeout) - tr.conns["udp"] = []*persistConn{ - {c1, time.Now()}, - {c2, time.Now()}, - {c3, time.Now()}, - } + tr.conns[typeUdp] = []*persistConn{{c1, time.Now()}, {c2, time.Now()}, {c3, time.Now()}} - if tr.len() != 3 { + if len(tr.conns[typeUdp]) != 3 { t.Error("Expected 3 connections") } tr.cleanup(true) - if tr.len() > 0 { + if len(tr.conns[typeUdp]) > 0 { t.Error("Expected no cached connections") } } diff --git a/plugin/forward/proxy.go b/plugin/forward/proxy.go index bf4d68dca91..60dfa47a610 100644 --- a/plugin/forward/proxy.go +++ b/plugin/forward/proxy.go @@ -12,11 +12,8 @@ import ( // Proxy defines an upstream host. type Proxy struct { fails uint32 + addr string - addr string - - // Connection caching - expire time.Duration transport *Transport // health checking @@ -69,7 +66,7 @@ func (p *Proxy) Down(maxfails uint32) bool { } // close stops the health checking goroutine. -func (p *Proxy) close() { p.probe.Stop() } +func (p *Proxy) stop() { p.probe.Stop() } func (p *Proxy) finalizer() { p.transport.Stop() } // start starts the proxy's healthchecking. @@ -80,6 +77,5 @@ func (p *Proxy) start(duration time.Duration) { const ( maxTimeout = 2 * time.Second - minTimeout = 200 * time.Millisecond hcInterval = 500 * time.Millisecond ) diff --git a/plugin/forward/proxy_test.go b/plugin/forward/proxy_test.go index 7075e1133ad..b962f561b4e 100644 --- a/plugin/forward/proxy_test.go +++ b/plugin/forward/proxy_test.go @@ -35,7 +35,7 @@ func TestProxyClose(t *testing.T) { go func() { p.Connect(ctx, state, options{}) }() go func() { p.Connect(ctx, state, options{forceTCP: true}) }() - p.close() + p.stop() } } diff --git a/plugin/forward/setup.go b/plugin/forward/setup.go index d084d61180b..d457566931a 100644 --- a/plugin/forward/setup.go +++ b/plugin/forward/setup.go @@ -13,15 +13,9 @@ import ( "github.com/coredns/coredns/plugin/pkg/transport" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) -func init() { - caddy.RegisterPlugin("forward", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("forward", setup) } func setup(c *caddy.Controller) error { f, err := parseForward(c) @@ -60,14 +54,11 @@ func (f *Forward) OnStartup() (err error) { // OnShutdown stops all configured proxies. func (f *Forward) OnShutdown() error { for _, p := range f.proxies { - p.close() + p.stop() } return nil } -// Close is a synonym for OnShutdown(). -func (f *Forward) Close() { f.OnShutdown() } - func parseForward(c *caddy.Controller) (*Forward, error) { var ( f *Forward @@ -79,7 +70,7 @@ func parseForward(c *caddy.Controller) (*Forward, error) { return nil, plugin.ErrOnce } i++ - f, err = ParseForwardStanza(&c.Dispenser) + f, err = parseStanza(c) if err != nil { return nil, err } @@ -87,8 +78,7 @@ func parseForward(c *caddy.Controller) (*Forward, error) { return f, nil } -// ParseForwardStanza parses one forward stanza -func ParseForwardStanza(c *caddyfile.Dispenser) (*Forward, error) { +func parseStanza(c *caddy.Controller) (*Forward, error) { f := New() if !c.Args(&f.from) { @@ -133,7 +123,7 @@ func ParseForwardStanza(c *caddyfile.Dispenser) (*Forward, error) { return f, nil } -func parseBlock(c *caddyfile.Dispenser, f *Forward) error { +func parseBlock(c *caddy.Controller, f *Forward) error { switch c.Val() { case "except": ignore := c.RemainingArgs() diff --git a/plugin/forward/type.go b/plugin/forward/type.go new file mode 100644 index 00000000000..83ddbc7c5e6 --- /dev/null +++ b/plugin/forward/type.go @@ -0,0 +1,37 @@ +package forward + +import "net" + +type transportType int + +const ( + typeUdp transportType = iota + typeTcp + typeTls + typeTotalCount // keep this last +) + +func stringToTransportType(s string) transportType { + switch s { + case "udp": + return typeUdp + case "tcp": + return typeTcp + case "tcp-tls": + return typeTls + } + + return typeUdp +} + +func (t *Transport) transportTypeFromConn(pc *persistConn) transportType { + if _, ok := pc.c.Conn.(*net.UDPConn); ok { + return typeUdp + } + + if t.tlsConfig == nil { + return typeTcp + } + + return typeTls +} diff --git a/plugin/grpc/OWNERS b/plugin/grpc/OWNERS deleted file mode 100644 index 7b778f5bdb4..00000000000 --- a/plugin/grpc/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - inigohu - - miekg -approvers: - - inigohu - - miekg diff --git a/plugin/grpc/README.md b/plugin/grpc/README.md index 39e05eca761..36314232a3d 100644 --- a/plugin/grpc/README.md +++ b/plugin/grpc/README.md @@ -60,11 +60,11 @@ Also note the TLS config is "global" for the whole grpc proxy if you need a diff ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric are exported: * `coredns_grpc_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_grpc_request_count_total{to}` - query count per upstream. -* `coredns_grpc_response_rcode_total{to, rcode}` - count of RCODEs per upstream. +* `coredns_grpc_response_rcode_count_total{to, rcode}` - count of RCODEs per upstream. and we are randomly (this always uses the `random` policy) spraying to an upstream. ## Examples diff --git a/plugin/grpc/setup.go b/plugin/grpc/setup.go index aecef9d8359..a234efb37b6 100644 --- a/plugin/grpc/setup.go +++ b/plugin/grpc/setup.go @@ -11,15 +11,9 @@ import ( pkgtls "github.com/coredns/coredns/plugin/pkg/tls" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) -func init() { - caddy.RegisterPlugin("grpc", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("grpc", setup) } func setup(c *caddy.Controller) error { g, err := parseGRPC(c) @@ -55,7 +49,7 @@ func parseGRPC(c *caddy.Controller) (*GRPC, error) { return nil, plugin.ErrOnce } i++ - g, err = parseGRPCStanza(&c.Dispenser) + g, err = parseStanza(c) if err != nil { return nil, err } @@ -63,7 +57,7 @@ func parseGRPC(c *caddy.Controller) (*GRPC, error) { return g, nil } -func parseGRPCStanza(c *caddyfile.Dispenser) (*GRPC, error) { +func parseStanza(c *caddy.Controller) (*GRPC, error) { g := newGRPC() if !c.Args(&g.from) { @@ -104,7 +98,7 @@ func parseGRPCStanza(c *caddyfile.Dispenser) (*GRPC, error) { return g, nil } -func parseBlock(c *caddyfile.Dispenser, g *GRPC) error { +func parseBlock(c *caddy.Controller, g *GRPC) error { switch c.Val() { case "except": diff --git a/plugin/health/OWNERS b/plugin/health/OWNERS deleted file mode 100644 index d909fd4511c..00000000000 --- a/plugin/health/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - fastest963 - - miekg -approvers: - - fastest963 - - miekg diff --git a/plugin/health/README.md b/plugin/health/README.md index eb86635ad10..d4228d60019 100644 --- a/plugin/health/README.md +++ b/plugin/health/README.md @@ -44,11 +44,11 @@ net { } ~~~ -Doing this is supported but both endponts ":8080" and ":8081" will export the exact same health. +Doing this is supported but both endpoints ":8080" and ":8081" will export the exact same health. ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_health_request_duration_seconds{}` - duration to process a HTTP query to the local `/health` endpoint. As this a local operation it should be fast. A (large) increase in this diff --git a/plugin/health/health.go b/plugin/health/health.go index 55ff68407e5..b5b4b95a268 100644 --- a/plugin/health/health.go +++ b/plugin/health/health.go @@ -8,6 +8,7 @@ import ( "time" clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/reuseport" ) var log = clog.NewWithPlugin("health") @@ -29,8 +30,7 @@ func (h *health) OnStartup() error { h.Addr = ":8080" } h.stop = make(chan bool) - - ln, err := net.Listen("tcp", h.Addr) + ln, err := reuseport.Listen("tcp", h.Addr) if err != nil { return err } @@ -42,8 +42,7 @@ func (h *health) OnStartup() error { h.mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { // We're always healthy. w.WriteHeader(http.StatusOK) - io.WriteString(w, "OK") - return + io.WriteString(w, http.StatusText(http.StatusOK)) }) go func() { http.Serve(h.ln, h.mux) }() diff --git a/plugin/health/health_test.go b/plugin/health/health_test.go index 7c7ad43a13b..35ab285feef 100644 --- a/plugin/health/health_test.go +++ b/plugin/health/health_test.go @@ -31,7 +31,7 @@ func TestHealth(t *testing.T) { } response.Body.Close() - if string(content) != "OK" { + if string(content) != http.StatusText(http.StatusOK) { t.Errorf("Invalid response body: expecting 'OK', got '%s'", string(content)) } } diff --git a/plugin/health/overloaded.go b/plugin/health/overloaded.go index 04d6a5f26c5..1c6602b825b 100644 --- a/plugin/health/overloaded.go +++ b/plugin/health/overloaded.go @@ -2,7 +2,6 @@ package health import ( "net/http" - "sync" "time" "github.com/coredns/coredns/plugin" @@ -48,5 +47,3 @@ var ( Help: "Histogram of the time (in seconds) each request took.", }) ) - -var once sync.Once diff --git a/plugin/health/setup.go b/plugin/health/setup.go index 75e35d8e603..1f72cb245dc 100644 --- a/plugin/health/setup.go +++ b/plugin/health/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("health", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("health", setup) } func setup(c *caddy.Controller) error { addr, lame, err := parse(c) diff --git a/plugin/hosts/OWNERS b/plugin/hosts/OWNERS deleted file mode 100644 index ae6484f9d45..00000000000 --- a/plugin/hosts/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - johnbelamaric - - pmoroney -approvers: - - johnbelamaric - - pmoroney diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index d42714828fd..7cfe8e933c3 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -11,14 +11,19 @@ file that exists on disk. It checks the file for changes and updates the zones a plugin only supports A, AAAA, and PTR records. The hosts plugin can be used with readily available hosts files that block access to advertising servers. -The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the new definitions. -Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the +new definitions. Should the file be deleted, any inlined content will continue to be served. When +the file is restored, it will then again be used. + +If you want to pass the request to the rest of the plugin chain if there is no match in the *hosts* +plugin, you must specify the `fallthrough` option. This plugin can only be used once per Server Block. ## The hosts file -Commonly the entries are of the form `IP_address canonical_hostname [aliases...]` as explained by the hosts(5) man page. +Commonly the entries are of the form `IP_address canonical_hostname [aliases...]` as explained by +the hosts(5) man page. Examples: @@ -34,7 +39,8 @@ fdfc:a744:27b5:3b0e::1 example.com example ### PTR records -PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file entries) and cannot be created manually. +PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file +entries) and cannot be created manually. ## Syntax @@ -49,7 +55,7 @@ hosts [FILE [ZONES...]] { ~~~ * **FILE** the hosts file to read and parse. If the path is relative the path from the *root* - directive will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes + plugin will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes every 5 seconds. * **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block are used. @@ -57,13 +63,22 @@ hosts [FILE [ZONES...]] { then all of them will be treated as the additional content for hosts file. The specified hosts file path will still be read but entries will be overridden. * `ttl` change the DNS TTL of the records generated (forward and reverse). The default is 3600 seconds (1 hour). -* `reload` change the period between each hostsfile reload. A time of zero seconds disable the feature. Examples of valid durations: "300ms", "1.5h" or "2h45m" are valid duration with units "ns" (nanosecond), "us" (or "µs" for microsecond), "ms" (millisecond), "s" (second), "m" (minute), "h" (hour). +* `reload` change the period between each hostsfile reload. A time of zero seconds disables the + feature. Examples of valid durations: "300ms", "1.5h" or "2h45m". See Go's + [time](https://godoc.org/time). package. * `no_reverse` disable the automatic generation of the `in-addr.arpa` or `ip6.arpa` entries for the hosts * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then only queries for those zones will be subject to fallthrough. +## Metrics + +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: + +- `coredns_hosts_entries_count{}` - The combined number of entries in hosts and Corefile. +- `coredns_hosts_reload_timestamp_seconds{}` - The timestamp of the last reload of hosts file. + ## Examples Load `/etc/hosts` file. @@ -96,11 +111,12 @@ next plugin if query doesn't match. Load hosts file inlined in Corefile. ~~~ -. { - hosts example.hosts example.org { +example.hosts example.org { + hosts { 10.0.0.1 example.org fallthrough } + whoami } ~~~ diff --git a/plugin/hosts/hosts.go b/plugin/hosts/hosts.go index 8650053c03f..a94c65a7e1e 100644 --- a/plugin/hosts/hosts.go +++ b/plugin/hosts/hosts.go @@ -29,9 +29,9 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( zone := plugin.Zones(h.Origins).Matches(qname) if zone == "" { - // PTR zones don't need to be specified in Origins - if state.Type() != "PTR" { - // If this doesn't match we need to fall through regardless of h.Fallthrough + // PTR zones don't need to be specified in Origins. + if state.QType() != dns.TypePTR { + // if this doesn't match we need to fall through regardless of h.Fallthrough return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } } @@ -56,8 +56,10 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( if h.Fall.Through(qname) { return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } - if !h.otherRecordsExist(state.QType(), qname) { - return dns.RcodeNameError, nil + // We want to send an NXDOMAIN, but because of /etc/hosts' setup we don't have a SOA, so we make it REFUSED + // to at least give an answer back to signals we're having problems resolving this. + if !h.otherRecordsExist(qname) { + return dns.RcodeServerFailure, nil } } @@ -70,26 +72,14 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( return dns.RcodeSuccess, nil } -func (h Hosts) otherRecordsExist(qtype uint16, qname string) bool { - switch qtype { - case dns.TypeA: - if len(h.LookupStaticHostV6(qname)) > 0 { - return true - } - case dns.TypeAAAA: - if len(h.LookupStaticHostV4(qname)) > 0 { - return true - } - default: - if len(h.LookupStaticHostV4(qname)) > 0 { - return true - } - if len(h.LookupStaticHostV6(qname)) > 0 { - return true - } +func (h Hosts) otherRecordsExist(qname string) bool { + if len(h.LookupStaticHostV4(qname)) > 0 { + return true + } + if len(h.LookupStaticHostV6(qname)) > 0 { + return true } return false - } // Name implements the plugin.Handle interface. @@ -97,39 +87,36 @@ func (h Hosts) Name() string { return "hosts" } // a takes a slice of net.IPs and returns a slice of A RRs. func a(zone string, ttl uint32, ips []net.IP) []dns.RR { - answers := []dns.RR{} - for _, ip := range ips { + answers := make([]dns.RR, len(ips)) + for i, ip := range ips { r := new(dns.A) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeA, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl} r.A = ip - answers = append(answers, r) + answers[i] = r } return answers } // aaaa takes a slice of net.IPs and returns a slice of AAAA RRs. func aaaa(zone string, ttl uint32, ips []net.IP) []dns.RR { - answers := []dns.RR{} - for _, ip := range ips { + answers := make([]dns.RR, len(ips)) + for i, ip := range ips { r := new(dns.AAAA) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl} r.AAAA = ip - answers = append(answers, r) + answers[i] = r } return answers } // ptr takes a slice of host names and filters out the ones that aren't in Origins, if specified, and returns a slice of PTR RRs. func (h *Hosts) ptr(zone string, ttl uint32, names []string) []dns.RR { - answers := []dns.RR{} - for _, n := range names { + answers := make([]dns.RR, len(names)) + for i, n := range names { r := new(dns.PTR) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypePTR, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: ttl} r.Ptr = dns.Fqdn(n) - answers = append(answers, r) + answers[i] = r } return answers } diff --git a/plugin/hosts/hosts_test.go b/plugin/hosts/hosts_test.go index 975710bb31b..062c998513c 100644 --- a/plugin/hosts/hosts_test.go +++ b/plugin/hosts/hosts_test.go @@ -2,7 +2,6 @@ package hosts import ( "context" - "io" "strings" "testing" @@ -12,20 +11,17 @@ import ( "github.com/miekg/dns" ) -func (h *Hostsfile) parseReader(r io.Reader) { - h.hmap = h.parse(r) -} - func TestLookupA(t *testing.T) { h := Hosts{ Next: test.ErrorHandler(), Hostsfile: &Hostsfile{ Origins: []string{"."}, - hmap: newHostsMap(), + hmap: newMap(), + inline: newMap(), options: newOptions(), }, } - h.parseReader(strings.NewReader(hostsExample)) + h.hmap = h.parse(strings.NewReader(hostsExample)) ctx := context.TODO() diff --git a/plugin/hosts/hostsfile.go b/plugin/hosts/hostsfile.go index 421f8a77cd7..19cb7e7b3f5 100644 --- a/plugin/hosts/hostsfile.go +++ b/plugin/hosts/hostsfile.go @@ -17,9 +17,12 @@ import ( "time" "github.com/coredns/coredns/plugin" + + "github.com/prometheus/client_golang/prometheus" ) -func parseLiteralIP(addr string) net.IP { +// parseIP calls discards any v6 zone info, before calling net.ParseIP. +func parseIP(addr string) net.IP { if i := strings.Index(addr, "%"); i >= 0 { // discard ipv6 zone addr = addr[0:i] @@ -28,10 +31,6 @@ func parseLiteralIP(addr string) net.IP { return net.ParseIP(addr) } -func absDomainName(b string) string { - return plugin.Name(b).Normalize() -} - type options struct { // automatically generate IP to Hostname PTR entries // for host entries we parse @@ -48,48 +47,40 @@ func newOptions() *options { return &options{ autoReverse: true, ttl: 3600, - reload: durationOf5s, + reload: time.Duration(5 * time.Second), } } -type hostsMap struct { - // Key for the list of literal IP addresses must be a host - // name. It would be part of DNS labels, a FQDN or an absolute - // FQDN. - // For now the key is converted to lower case for convenience. - byNameV4 map[string][]net.IP - byNameV6 map[string][]net.IP +// Map contains the IPv4/IPv6 and reverse mapping. +type Map struct { + // Key for the list of literal IP addresses must be a FQDN lowercased host name. + name4 map[string][]net.IP + name6 map[string][]net.IP // Key for the list of host names must be a literal IP address - // including IPv6 address with zone identifier. + // including IPv6 address without zone identifier. // We don't support old-classful IP address notation. - byAddr map[string][]string + addr map[string][]string } -const ( - durationOf0s = time.Duration(0) - durationOf5s = time.Duration(5 * time.Second) -) - -func newHostsMap() *hostsMap { - return &hostsMap{ - byNameV4: make(map[string][]net.IP), - byNameV6: make(map[string][]net.IP), - byAddr: make(map[string][]string), +func newMap() *Map { + return &Map{ + name4: make(map[string][]net.IP), + name6: make(map[string][]net.IP), + addr: make(map[string][]string), } } -// Len returns the total number of addresses in the hostmap, this includes -// V4/V6 and any reverse addresses. -func (h *hostsMap) Len() int { +// Len returns the total number of addresses in the hostmap, this includes V4/V6 and any reverse addresses. +func (h *Map) Len() int { l := 0 - for _, v4 := range h.byNameV4 { + for _, v4 := range h.name4 { l += len(v4) } - for _, v6 := range h.byNameV6 { + for _, v6 := range h.name6 { l += len(v6) } - for _, a := range h.byAddr { + for _, a := range h.addr { l += len(a) } return l @@ -103,11 +94,10 @@ type Hostsfile struct { Origins []string // hosts maps for lookups - hmap *hostsMap + hmap *Map // inline saves the hosts file that is inlined in a Corefile. - // We need a copy here as we want to use it to initialize the maps for parse. - inline *hostsMap + inline *Map // path to the hosts file path string @@ -132,6 +122,7 @@ func (h *Hostsfile) readHosts() { h.RLock() size := h.size h.RUnlock() + if err == nil && h.mtime.Equal(stat.ModTime()) && size == stat.Size() { return } @@ -146,6 +137,8 @@ func (h *Hostsfile) readHosts() { h.mtime = stat.ModTime() h.size = stat.Size() + hostsEntries.WithLabelValues().Set(float64(h.inline.Len() + h.hmap.Len())) + hostsReloadTime.Set(float64(stat.ModTime().UnixNano()) / 1e9) h.Unlock() } @@ -155,12 +148,11 @@ func (h *Hostsfile) initInline(inline []string) { } h.inline = h.parse(strings.NewReader(strings.Join(inline, "\n"))) - *h.hmap = *h.inline } -// Parse reads the hostsfile and populates the byName and byAddr maps. -func (h *Hostsfile) parse(r io.Reader) *hostsMap { - hmap := newHostsMap() +// Parse reads the hostsfile and populates the byName and addr maps. +func (h *Hostsfile) parse(r io.Reader) *Map { + hmap := newMap() scanner := bufio.NewScanner(r) for scanner.Scan() { @@ -173,73 +165,52 @@ func (h *Hostsfile) parse(r io.Reader) *hostsMap { if len(f) < 2 { continue } - addr := parseLiteralIP(string(f[0])) + addr := parseIP(string(f[0])) if addr == nil { continue } - ver := ipVersion(string(f[0])) + + family := 0 + if addr.To4() != nil { + family = 1 + } else { + family = 2 + } + for i := 1; i < len(f); i++ { - name := absDomainName(string(f[i])) + name := plugin.Name(string(f[i])).Normalize() if plugin.Zones(h.Origins).Matches(name) == "" { // name is not in Origins continue } - switch ver { - case 4: - hmap.byNameV4[name] = append(hmap.byNameV4[name], addr) - case 6: - hmap.byNameV6[name] = append(hmap.byNameV6[name], addr) + switch family { + case 1: + hmap.name4[name] = append(hmap.name4[name], addr) + case 2: + hmap.name6[name] = append(hmap.name6[name], addr) default: continue } if !h.options.autoReverse { continue } - hmap.byAddr[addr.String()] = append(hmap.byAddr[addr.String()], name) + hmap.addr[addr.String()] = append(hmap.addr[addr.String()], name) } } - for name := range h.hmap.byNameV4 { - hmap.byNameV4[name] = append(hmap.byNameV4[name], h.hmap.byNameV4[name]...) - } - for name := range h.hmap.byNameV4 { - hmap.byNameV6[name] = append(hmap.byNameV6[name], h.hmap.byNameV6[name]...) - } - - for addr := range h.hmap.byAddr { - hmap.byAddr[addr] = append(hmap.byAddr[addr], h.hmap.byAddr[addr]...) - } - return hmap } -// ipVersion returns what IP version was used textually -// For why the string is parsed end to start, -// see IPv4-Compatible IPv6 addresses - RFC 4291 section 2.5.5 -func ipVersion(s string) int { - for i := len(s) - 1; i >= 0; i-- { - switch s[i] { - case '.': - return 4 - case ':': - return 6 - } - } - return 0 -} - -// LookupStaticHost looks up the IP addresses for the given host from the hosts file. -func (h *Hostsfile) lookupStaticHost(hmapByName map[string][]net.IP, host string) []net.IP { - fqhost := absDomainName(host) - +// lookupStaticHost looks up the IP addresses for the given host from the hosts file. +func (h *Hostsfile) lookupStaticHost(m map[string][]net.IP, host string) []net.IP { h.RLock() defer h.RUnlock() - if len(hmapByName) == 0 { + if len(m) == 0 { return nil } - ips, ok := hmapByName[fqhost] + ips, ok := m[host] if !ok { return nil } @@ -250,30 +221,54 @@ func (h *Hostsfile) lookupStaticHost(hmapByName map[string][]net.IP, host string // LookupStaticHostV4 looks up the IPv4 addresses for the given host from the hosts file. func (h *Hostsfile) LookupStaticHostV4(host string) []net.IP { - return h.lookupStaticHost(h.hmap.byNameV4, host) + host = strings.ToLower(host) + ip1 := h.lookupStaticHost(h.hmap.name4, host) + ip2 := h.lookupStaticHost(h.inline.name4, host) + return append(ip1, ip2...) } // LookupStaticHostV6 looks up the IPv6 addresses for the given host from the hosts file. func (h *Hostsfile) LookupStaticHostV6(host string) []net.IP { - return h.lookupStaticHost(h.hmap.byNameV6, host) + host = strings.ToLower(host) + ip1 := h.lookupStaticHost(h.hmap.name6, host) + ip2 := h.lookupStaticHost(h.inline.name6, host) + return append(ip1, ip2...) } // LookupStaticAddr looks up the hosts for the given address from the hosts file. func (h *Hostsfile) LookupStaticAddr(addr string) []string { - h.RLock() - defer h.RUnlock() - addr = parseLiteralIP(addr).String() + addr = parseIP(addr).String() if addr == "" { return nil } - if len(h.hmap.byAddr) == 0 { - return nil - } - hosts, ok := h.hmap.byAddr[addr] - if !ok { + + h.RLock() + defer h.RUnlock() + hosts1 := h.hmap.addr[addr] + hosts2 := h.inline.addr[addr] + + if len(hosts1) == 0 && len(hosts2) == 0 { return nil } - hostsCp := make([]string, len(hosts)) - copy(hostsCp, hosts) + + hostsCp := make([]string, len(hosts1)+len(hosts2)) + copy(hostsCp, hosts1) + copy(hostsCp[len(hosts1):], hosts2) return hostsCp } + +var ( + hostsEntries = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: "hosts", + Name: "entries_count", + Help: "The combined number of entries in hosts and Corefile.", + }, []string{}) + + hostsReloadTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: "hosts", + Name: "reload_timestamp_seconds", + Help: "The timestamp of the last reload of hosts file.", + }) +) diff --git a/plugin/hosts/hostsfile_test.go b/plugin/hosts/hostsfile_test.go index db0e63d75d8..626b8918d46 100644 --- a/plugin/hosts/hostsfile_test.go +++ b/plugin/hosts/hostsfile_test.go @@ -9,15 +9,18 @@ import ( "reflect" "strings" "testing" + + "github.com/coredns/coredns/plugin" ) func testHostsfile(file string) *Hostsfile { h := &Hostsfile{ Origins: []string{"."}, - hmap: newHostsMap(), + hmap: newMap(), + inline: newMap(), options: newOptions(), } - h.parseReader(strings.NewReader(file)) + h.hmap = h.parse(strings.NewReader(file)) return h } @@ -74,44 +77,43 @@ var lookupStaticHostTests = []struct { { hosts, []staticHostEntry{ - {"odin", []string{"127.0.0.2", "127.0.0.3"}, []string{"::2"}}, - {"thor", []string{"127.1.1.1"}, []string{}}, - {"ullr", []string{"127.1.1.2"}, []string{}}, - {"ullrhost", []string{"127.1.1.2"}, []string{}}, - {"localhost", []string{}, []string{"fe80::1"}}, + {"odin.", []string{"127.0.0.2", "127.0.0.3"}, []string{"::2"}}, + {"thor.", []string{"127.1.1.1"}, []string{}}, + {"ullr.", []string{"127.1.1.2"}, []string{}}, + {"ullrhost.", []string{"127.1.1.2"}, []string{}}, + {"localhost.", []string{}, []string{"fe80::1"}}, }, }, { singlelinehosts, // see golang.org/issue/6646 []staticHostEntry{ - {"odin", []string{"127.0.0.2"}, []string{}}, + {"odin.", []string{"127.0.0.2"}, []string{}}, }, }, { ipv4hosts, []staticHostEntry{ - {"localhost", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}}, - {"localhost.localdomain", []string{"127.0.0.3"}, []string{}}, + {"localhost.", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}}, + {"localhost.localdomain.", []string{"127.0.0.3"}, []string{}}, }, }, { ipv6hosts, []staticHostEntry{ - {"localhost", []string{}, []string{"::1", "fe80::1", "fe80::2", "fe80::3"}}, - {"localhost.localdomain", []string{}, []string{"fe80::3"}}, + {"localhost.", []string{}, []string{"::1", "fe80::1", "fe80::2", "fe80::3"}}, + {"localhost.localdomain.", []string{}, []string{"fe80::3"}}, }, }, { casehosts, []staticHostEntry{ - {"PreserveMe", []string{"127.0.0.1"}, []string{"::1"}}, - {"PreserveMe.local", []string{"127.0.0.1"}, []string{"::1"}}, + {"PreserveMe.", []string{"127.0.0.1"}, []string{"::1"}}, + {"PreserveMe.local.", []string{"127.0.0.1"}, []string{"::1"}}, }, }, } func TestLookupStaticHost(t *testing.T) { - for _, tt := range lookupStaticHostTests { h := testHostsfile(tt.file) for _, ent := range tt.ents { @@ -121,7 +123,7 @@ func TestLookupStaticHost(t *testing.T) { } func testStaticHost(t *testing.T, ent staticHostEntry, h *Hostsfile) { - ins := []string{ent.in, absDomainName(ent.in), strings.ToLower(ent.in), strings.ToUpper(ent.in)} + ins := []string{ent.in, plugin.Name(ent.in).Normalize(), strings.ToLower(ent.in), strings.ToUpper(ent.in)} for k, in := range ins { addrsV4 := h.LookupStaticHostV4(in) if len(addrsV4) != len(ent.v4) { @@ -156,43 +158,43 @@ var lookupStaticAddrTests = []struct { { hosts, []staticIPEntry{ - {"255.255.255.255", []string{"broadcasthost"}}, - {"127.0.0.2", []string{"odin"}}, - {"127.0.0.3", []string{"odin"}}, - {"::2", []string{"odin"}}, - {"127.1.1.1", []string{"thor"}}, - {"127.1.1.2", []string{"ullr", "ullrhost"}}, - {"fe80::1", []string{"localhost"}}, + {"255.255.255.255", []string{"broadcasthost."}}, + {"127.0.0.2", []string{"odin."}}, + {"127.0.0.3", []string{"odin."}}, + {"::2", []string{"odin."}}, + {"127.1.1.1", []string{"thor."}}, + {"127.1.1.2", []string{"ullr.", "ullrhost."}}, + {"fe80::1", []string{"localhost."}}, }, }, { singlelinehosts, // see golang.org/issue/6646 []staticIPEntry{ - {"127.0.0.2", []string{"odin"}}, + {"127.0.0.2", []string{"odin."}}, }, }, { ipv4hosts, // see golang.org/issue/8996 []staticIPEntry{ - {"127.0.0.1", []string{"localhost"}}, - {"127.0.0.2", []string{"localhost"}}, - {"127.0.0.3", []string{"localhost", "localhost.localdomain"}}, + {"127.0.0.1", []string{"localhost."}}, + {"127.0.0.2", []string{"localhost."}}, + {"127.0.0.3", []string{"localhost.", "localhost.localdomain."}}, }, }, { ipv6hosts, // see golang.org/issue/8996 []staticIPEntry{ - {"::1", []string{"localhost"}}, - {"fe80::1", []string{"localhost"}}, - {"fe80::2", []string{"localhost"}}, - {"fe80::3", []string{"localhost", "localhost.localdomain"}}, + {"::1", []string{"localhost."}}, + {"fe80::1", []string{"localhost."}}, + {"fe80::2", []string{"localhost."}}, + {"fe80::3", []string{"localhost.", "localhost.localdomain."}}, }, }, { casehosts, // see golang.org/issue/12806 []staticIPEntry{ - {"127.0.0.1", []string{"PreserveMe", "PreserveMe.local"}}, - {"::1", []string{"PreserveMe", "PreserveMe.local"}}, + {"127.0.0.1", []string{"PreserveMe.", "PreserveMe.local."}}, + {"::1", []string{"PreserveMe.", "PreserveMe.local."}}, }, }, } @@ -209,7 +211,7 @@ func TestLookupStaticAddr(t *testing.T) { func testStaticAddr(t *testing.T, ent staticIPEntry, h *Hostsfile) { hosts := h.LookupStaticAddr(ent.in) for i := range ent.out { - ent.out[i] = absDomainName(ent.out[i]) + ent.out[i] = plugin.Name(ent.out[i]).Normalize() } if !reflect.DeepEqual(hosts, ent.out) { t.Errorf("%s, lookupStaticAddr(%s) = %v; want %v", h.path, ent.in, hosts, h) @@ -221,7 +223,7 @@ func TestHostCacheModification(t *testing.T) { // See https://github.com/golang/go/issues/14212. h := testHostsfile(ipv4hosts) - ent := staticHostEntry{"localhost", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}} + ent := staticHostEntry{"localhost.", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}} testStaticHost(t, ent, h) // Modify the addresses return by lookupStaticHost. addrs := h.LookupStaticHostV6(ent.in) @@ -231,7 +233,7 @@ func TestHostCacheModification(t *testing.T) { testStaticHost(t, ent, h) h = testHostsfile(ipv6hosts) - entip := staticIPEntry{"::1", []string{"localhost"}} + entip := staticIPEntry{"::1", []string{"localhost."}} testStaticAddr(t, entip, h) // Modify the hosts return by lookupStaticAddr. hosts := h.LookupStaticAddr(entip.in) diff --git a/plugin/hosts/setup.go b/plugin/hosts/setup.go index 26c3c82d3df..09f030842d0 100644 --- a/plugin/hosts/setup.go +++ b/plugin/hosts/setup.go @@ -9,6 +9,7 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" @@ -16,17 +17,12 @@ import ( var log = clog.NewWithPlugin("hosts") -func init() { - caddy.RegisterPlugin("hosts", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("hosts", setup) } func periodicHostsUpdate(h *Hosts) chan bool { parseChan := make(chan bool) - if h.options.reload == durationOf0s { + if h.options.reload == 0 { return parseChan } @@ -57,6 +53,12 @@ func setup(c *caddy.Controller) error { return nil }) + c.OnStartup(func() error { + metrics.MustRegister(c, hostsEntries) + metrics.MustRegister(c, hostsReloadTime) + return nil + }) + c.OnShutdown(func() error { close(parseChan) return nil @@ -73,13 +75,12 @@ func setup(c *caddy.Controller) error { func hostsParse(c *caddy.Controller) (Hosts, error) { config := dnsserver.GetConfig(c) - options := newOptions() - h := Hosts{ Hostsfile: &Hostsfile{ path: "/etc/hosts", - hmap: newHostsMap(), - options: options, + hmap: newMap(), + inline: newMap(), + options: newOptions(), }, } @@ -129,7 +130,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { case "fallthrough": h.Fall.SetZonesFromArgs(c.RemainingArgs()) case "no_reverse": - options.autoReverse = false + h.options.autoReverse = false case "ttl": remaining := c.RemainingArgs() if len(remaining) < 1 { @@ -142,7 +143,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { if ttl <= 0 || ttl > 65535 { return h, c.Errf("ttl provided is invalid") } - options.ttl = uint32(ttl) + h.options.ttl = uint32(ttl) case "reload": remaining := c.RemainingArgs() if len(remaining) != 1 { @@ -152,10 +153,10 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { if err != nil { return h, c.Errf("invalid duration for reload '%s'", remaining[0]) } - if reload < durationOf0s { + if reload < 0 { return h, c.Errf("invalid negative duration for reload '%s'", remaining[0]) } - options.reload = reload + h.options.reload = reload default: if len(h.Fall.Zones) == 0 { line := strings.Join(append([]string{c.Val()}, c.RemainingArgs()...), " ") diff --git a/plugin/hosts/setup_test.go b/plugin/hosts/setup_test.go index 0e3800112ae..fd8a8060e70 100644 --- a/plugin/hosts/setup_test.go +++ b/plugin/hosts/setup_test.go @@ -100,7 +100,7 @@ func TestHostsInlineParse(t *testing.T) { tests := []struct { inputFileRules string shouldErr bool - expectedbyAddr map[string][]string + expectedaddr map[string][]string expectedFallthrough fall.F }{ { @@ -148,19 +148,20 @@ func TestHostsInlineParse(t *testing.T) { t.Fatalf("Test %d expected no errors, but got '%v'", i, err) } else if !test.shouldErr { if !h.Fall.Equal(test.expectedFallthrough) { - t.Fatalf("Test %d expected fallthrough of %v, got %v", i, test.expectedFallthrough, h.Fall) + t.Errorf("Test %d expected fallthrough of %v, got %v", i, test.expectedFallthrough, h.Fall) } - for k, expectedVal := range test.expectedbyAddr { - if val, ok := h.hmap.byAddr[k]; !ok { - t.Fatalf("Test %d expected %v, got no entry", i, k) - } else { - if len(expectedVal) != len(val) { - t.Fatalf("Test %d expected %v records for %v, got %v", i, len(expectedVal), k, len(val)) - } - for j := range expectedVal { - if expectedVal[j] != val[j] { - t.Fatalf("Test %d expected %v for %v, got %v", i, expectedVal[j], j, val[j]) - } + for k, expectedVal := range test.expectedaddr { + val, ok := h.inline.addr[k] + if !ok { + t.Errorf("Test %d expected %v, got no entry", i, k) + continue + } + if len(expectedVal) != len(val) { + t.Errorf("Test %d expected %v records for %v, got %v", i, len(expectedVal), k, len(val)) + } + for j := range expectedVal { + if expectedVal[j] != val[j] { + t.Errorf("Test %d expected %v for %v, got %v", i, expectedVal[j], j, val[j]) } } } diff --git a/plugin/import/README.md b/plugin/import/README.md index 7c6a66999d3..9da6e250a45 100644 --- a/plugin/import/README.md +++ b/plugin/import/README.md @@ -2,14 +2,14 @@ ## Name -*import* - include files or reference snippets from a Corefile. +*import* - includes files or references snippets from a Corefile. ## Description -The *import* plugin can be used to include files into the main configuration. Another use it to +The *import* plugin can be used to include files into the main configuration. Another use is to reference predefined snippets. Both can help to avoid some duplication. -This is a unique directive in that *import* can appear outside of a server block. In other words, it +This is a unique plugin in that *import* can appear outside of a server block. In other words, it can appear at the top of a Corefile where an address would normally be. ## Syntax diff --git a/plugin/k8s_external/OWNERS b/plugin/k8s_external/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/k8s_external/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/k8s_external/README.md b/plugin/k8s_external/README.md index f24c9a7719a..dc251ab02ff 100644 --- a/plugin/k8s_external/README.md +++ b/plugin/k8s_external/README.md @@ -2,7 +2,7 @@ ## Name -*k8s_external* - resolve load balancer and external IPs from outside kubernetes clusters. +*k8s_external* - resolves load balancer and external IPs from outside Kubernetes clusters. ## Description @@ -10,10 +10,10 @@ This plugin allows an additional zone to resolve the external IP address(es) of service. This plugin is only useful if the *kubernetes* plugin is also loaded. The plugin uses an external zone to resolve in-cluster IP addresses. It only handles queries for A, -AAAA and SRV records, all others result in NODATA responses. To make it a proper DNS zone it handles +AAAA and SRV records; all others result in NODATA responses. To make it a proper DNS zone, it handles SOA and NS queries for the apex of the zone. -By default the apex of the zone will look like (assuming the zone used is `example.org`): +By default the apex of the zone will look like the following (assuming the zone used is `example.org`): ~~~ dns example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( @@ -29,11 +29,11 @@ ns1.dns.example.org. 5 IN A .... ns1.dns.example.org. 5 IN AAAA .... ~~~ -Note we use the `dns` subdomain to place the records the DNS needs (see the `apex` directive). Also +Note that we use the `dns` subdomain for the records DNS needs (see the `apex` directive). Also note the SOA's serial number is static. The IP addresses of the nameserver records are those of the CoreDNS service. -The *k8s_external* plugin handles the subdomain `dns` and the apex of the zone by itself, all other +The *k8s_external* plugin handles the subdomain `dns` and the apex of the zone itself; all other queries are resolved to addresses in the cluster. ## Syntax @@ -44,7 +44,7 @@ k8s_external [ZONE...] * **ZONES** zones *k8s_external* should be authoritative for. -If you want to change the apex domain or use a different TTL for the return records you can use +If you want to change the apex domain or use a different TTL for the returned records you can use this extended syntax. ~~~ @@ -54,12 +54,12 @@ k8s_external [ZONE...] { } ~~~ -* **APEX** is the name (DNS label) to use the apex records, defaults to `dns`. +* **APEX** is the name (DNS label) to use for the apex records; it defaults to `dns`. * `ttl` allows you to set a custom **TTL** for responses. The default is 5 (seconds). # Examples -Enable names under `example.org` to be resolved to in cluster DNS addresses. +Enable names under `example.org` to be resolved to in-cluster DNS addresses. ~~~ . { @@ -68,7 +68,7 @@ Enable names under `example.org` to be resolved to in cluster DNS addresses. } ~~~ -With the Corefile above, the following Service will get an `A` record for `test.default.example.org` with IP address `192.168.200.123`. +With the Corefile above, the following Service will get an `A` record for `test.default.example.org` with the IP address `192.168.200.123`. ~~~ apiVersion: v1 diff --git a/plugin/k8s_external/apex.go b/plugin/k8s_external/apex.go index f58894817e5..85edbea6cda 100644 --- a/plugin/k8s_external/apex.go +++ b/plugin/k8s_external/apex.go @@ -20,7 +20,7 @@ func (e *External) serveApex(state request.Request) (int, error) { addr := e.externalAddrFunc(state) for _, rr := range addr { rr.Header().Ttl = e.ttl - rr.Header().Name = state.QName() + rr.Header().Name = dnsutil.Join("ns1", e.apex, state.QName()) m.Extra = append(m.Extra, rr) } default: diff --git a/plugin/k8s_external/apex_test.go b/plugin/k8s_external/apex_test.go index 14aaf64bcb5..2f6923f56c2 100644 --- a/plugin/k8s_external/apex_test.go +++ b/plugin/k8s_external/apex_test.go @@ -59,7 +59,7 @@ var testsApex = []test.Case{ test.NS("example.com. 5 IN NS ns1.dns.example.com."), }, Extra: []dns.RR{ - test.A("example.com. 5 IN A 127.0.0.1"), + test.A("ns1.dns.example.com. 5 IN A 127.0.0.1"), }, }, { diff --git a/plugin/k8s_external/external_test.go b/plugin/k8s_external/external_test.go index 40c7cb92703..a83a2da720b 100644 --- a/plugin/k8s_external/external_test.go +++ b/plugin/k8s_external/external_test.go @@ -152,7 +152,7 @@ var tests = []test.Case{ type external struct{} func (external) HasSynced() bool { return true } -func (external) Run() { return } +func (external) Run() {} func (external) Stop() error { return nil } func (external) EpIndexReverse(string) []*object.Endpoints { return nil } func (external) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/k8s_external/setup.go b/plugin/k8s_external/setup.go index ed645c0c859..5c2dce0b2e2 100644 --- a/plugin/k8s_external/setup.go +++ b/plugin/k8s_external/setup.go @@ -9,12 +9,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("k8s_external", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("k8s_external", setup) } func setup(c *caddy.Controller) error { e, err := parse(c) diff --git a/plugin/kubernetes/OWNERS b/plugin/kubernetes/OWNERS deleted file mode 100644 index e662359c926..00000000000 --- a/plugin/kubernetes/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -reviewers: - - bradbeam - - chrisohaver - - johnbelamaric - - miekg - - rajansandeep - - yongtang -approvers: - - bradbeam - - chrisohaver - - johnbelamaric - - miekg - - rajansandeep - - yongtang diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index 50a31801f33..fd3a60b17f6 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -2,7 +2,7 @@ ## Name -*kubernetes* - enables the reading zone data from a Kubernetes cluster. +*kubernetes* - enables reading zone data from a Kubernetes cluster. ## Description @@ -24,14 +24,13 @@ This plugin can only be used once per Server Block. kubernetes [ZONES...] ~~~ -With only the directive specified, the *kubernetes* plugin will default to the zone specified in +With only the plugin specified, the *kubernetes* plugin will default to the zone specified in the server's block. It will handle all queries in that zone and connect to Kubernetes in-cluster. It will not provide PTR records for services or A records for pods. If **ZONES** is used it specifies all the zones the plugin should be authoritative for. ``` kubernetes [ZONES...] { - resyncperiod DURATION endpoint URL tls CERT KEY CACERT kubeconfig KUBECONFIG CONTEXT @@ -47,8 +46,6 @@ kubernetes [ZONES...] { } ``` -* `resyncperiod` specifies the Kubernetes data API **DURATION** period. By - default resync is disabled (DURATION is zero). * `endpoint` specifies the **URL** for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. * `tls` **CERT** **KEY** **CACERT** are the TLS cert, key and the CA cert file names for remote k8s connection. @@ -212,15 +209,15 @@ or the word "any"), then that label will match all values. The labels that acce * multiple wildcards are allowed in a single query, e.g., `A` Request `*.*.svc.zone.` or `SRV` request `*.*.*.*.svc.zone.` For example, wildcards can be used to resolve all Endpoints for a Service as `A` records. e.g.: `*.service.ns.svc.myzone.local` will return the Endpoint IPs in the Service `service` in namespace `default`: - ``` + +``` *.service.default.svc.cluster.local. 5 IN A 192.168.10.10 *.service.default.svc.cluster.local. 5 IN A 192.168.25.15 ``` - This response can be randomized using the `loadbalance` plugin ## Metadata -The kubernetes plugin will publish the following metadata, if the _metadata_ +The kubernetes plugin will publish the following metadata, if the *metadata* plugin is also enabled: * kubernetes/endpoint: the endpoint name in the query @@ -231,3 +228,20 @@ plugin is also enabled: * kubernetes/service: the service name in the query * kubernetes/client-namespace: the client pod's namespace, if `pods verified` mode is enabled * kubernetes/client-pod-name: the client pod's name, if `pods verified` mode is enabled + +## Metrics + +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: + +* `coredns_kubernetes_dns_programming_duration_seconds{service_kind}` - Exports the + [DNS programming latency SLI](https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md). + The metrics has the `service_kind` label that identifies the kind of the + [kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service). + It may take one of the three values: + * `cluster_ip` + * `headless_with_selector` + * `headless_without_selector` + +## Bugs + +The duration metric only supports the "headless_with_selector" service currently. diff --git a/plugin/kubernetes/autopath.go b/plugin/kubernetes/autopath.go index 06bde0a6904..33bf401f59b 100644 --- a/plugin/kubernetes/autopath.go +++ b/plugin/kubernetes/autopath.go @@ -49,7 +49,7 @@ func (k *Kubernetes) AutoPath(state request.Request) []string { return search } -// podWithIP return the api.Pod for source IP ip. It returns nil if nothing can be found. +// podWithIP return the api.Pod for source IP. It returns nil if nothing can be found. func (k *Kubernetes) podWithIP(ip string) *object.Pod { ps := k.APIConn.PodIndex(ip) if len(ps) == 0 { diff --git a/plugin/kubernetes/controller.go b/plugin/kubernetes/controller.go index 63473558499..98f82341be7 100644 --- a/plugin/kubernetes/controller.go +++ b/plugin/kubernetes/controller.go @@ -80,7 +80,6 @@ type dnsControl struct { type dnsControlOpts struct { initPodCache bool initEndpointsCache bool - resyncPeriod time.Duration ignoreEmptyService bool // Label handling. @@ -89,8 +88,9 @@ type dnsControlOpts struct { namespaceLabelSelector *meta.LabelSelector namespaceSelector labels.Selector - zones []string - endpointNameMode bool + zones []string + endpointNameMode bool + skipAPIObjectsCleanup bool } // newDNSController creates a controller for CoreDNS. @@ -110,10 +110,9 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: serviceWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Service{}, - opts.resyncPeriod, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{svcNameNamespaceIndex: svcNameNamespaceIndexFunc, svcIPIndex: svcIPIndexFunc}, - object.ToService, + object.DefaultProcessor(object.ToService(opts.skipAPIObjectsCleanup)), ) if opts.initPodCache { @@ -123,10 +122,9 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: podWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Pod{}, - opts.resyncPeriod, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{podIPIndex: podIPIndexFunc}, - object.ToPod, + object.DefaultProcessor(object.ToPod(opts.skipAPIObjectsCleanup)), ) } @@ -137,10 +135,50 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: endpointsWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Endpoints{}, - opts.resyncPeriod, - cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, + cache.ResourceEventHandlerFuncs{}, cache.Indexers{epNameNamespaceIndex: epNameNamespaceIndexFunc, epIPIndex: epIPIndexFunc}, - object.ToEndpoints) + func(clientState cache.Indexer, h cache.ResourceEventHandler) cache.ProcessFunc { + return func(obj interface{}) error { + for _, d := range obj.(cache.Deltas) { + + apiEndpoints, obj := object.ToEndpoints(d.Object) + + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + h.OnUpdate(old, obj) + // endpoint updates can come frequently, make sure it's a change we care about + if !endpointsEquivalent(old.(*object.Endpoints), obj) { + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + } else { + if err := clientState.Add(obj); err != nil { + return err + } + h.OnAdd(d.Object) + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + case cache.Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + h.OnDelete(d.Object) + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + if !opts.skipAPIObjectsCleanup { + *apiEndpoints = api.Endpoints{} + } + } + return nil + } + }) + } dns.nsLister, dns.nsController = cache.NewInformer( @@ -149,7 +187,7 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: namespaceWatchFunc(dns.client, dns.namespaceSelector), }, &api.Namespace{}, - opts.resyncPeriod, + defaultResyncPeriod, cache.ResourceEventHandlerFuncs{}) return &dns @@ -427,17 +465,6 @@ func (dns *dnsControl) detectChanges(oldObj, newObj interface{}) { switch ob := obj.(type) { case *object.Service: dns.updateModifed() - case *object.Endpoints: - if newObj == nil || oldObj == nil { - dns.updateModifed() - return - } - p := oldObj.(*object.Endpoints) - // endpoint updates can come frequently, make sure it's a change we care about - if endpointsEquivalent(p, ob) { - return - } - dns.updateModifed() case *object.Pod: dns.updateModifed() default: @@ -445,6 +472,10 @@ func (dns *dnsControl) detectChanges(oldObj, newObj interface{}) { } } +func (dns *dnsControl) getServices(endpoints *object.Endpoints) []*object.Service { + return dns.SvcIndex(object.EndpointsKey(endpoints.GetName(), endpoints.GetNamespace())) +} + // subsetsEquivalent checks if two endpoint subsets are significantly equivalent // I.e. that they have the same ready addresses, host names, ports (including protocol // and service names for SRV) @@ -487,6 +518,9 @@ func subsetsEquivalent(sa, sb object.EndpointSubset) bool { // endpointsEquivalent checks if the update to an endpoint is something // that matters to us or if they are effectively equivalent. func endpointsEquivalent(a, b *object.Endpoints) bool { + if a == nil || b == nil { + return false + } if len(a.Subsets) != len(b.Subsets) { return false @@ -516,3 +550,5 @@ func (dns *dnsControl) updateModifed() { } var errObj = errors.New("obj was not of the correct type") + +const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/controller_test.go b/plugin/kubernetes/controller_test.go index 37d4705e42c..f945684cb01 100644 --- a/plugin/kubernetes/controller_test.go +++ b/plugin/kubernetes/controller_test.go @@ -78,7 +78,7 @@ func generateEndpoints(cidr string, client kubernetes.Interface) { }, } ep.ObjectMeta.Name = "svc" + strconv.Itoa(count) - _, err = client.CoreV1().Endpoints("testns").Create(ep) + client.CoreV1().Endpoints("testns").Create(ep) count++ } } diff --git a/plugin/kubernetes/external.go b/plugin/kubernetes/external.go index 91a8a2ed159..42495a430b4 100644 --- a/plugin/kubernetes/external.go +++ b/plugin/kubernetes/external.go @@ -21,7 +21,7 @@ func (k *Kubernetes) External(state request.Request) ([]msg.Service, int) { if last < 0 { return nil, dns.RcodeServerFailure } - // We dealing with a fairly normal domain name here, but; we still need to have the service + // We are dealing with a fairly normal domain name here, but we still need to have the service // and the namespace: // service.namespace. // @@ -85,8 +85,10 @@ func (k *Kubernetes) External(state request.Request) ([]msg.Service, int) { // ExternalAddress returns the external service address(es) for the CoreDNS service. func (k *Kubernetes) ExternalAddress(state request.Request) []dns.RR { - // This is probably wrong, because of all the fallback behavior of k.nsAddr, i.e. can get - // an address that isn't reacheable from outside the cluster. - rrs := []dns.RR{k.nsAddr()} - return rrs + // If CoreDNS is running inside the Kubernetes cluster: k.nsAddrs() will return the external IPs of the services + // targeting the CoreDNS Pod. + // If CoreDNS is running outside of the Kubernetes cluster: k.nsAddrs() will return the first non-loopback IP + // address seen on the local system it is running on. This could be the wrong answer if coredns is using the *bind* + // plugin to bind to a different IP address. + return k.nsAddrs(true, state.Zone) } diff --git a/plugin/kubernetes/external_test.go b/plugin/kubernetes/external_test.go index dabbd94896b..7ccbb279886 100644 --- a/plugin/kubernetes/external_test.go +++ b/plugin/kubernetes/external_test.go @@ -79,7 +79,7 @@ func TestExternal(t *testing.T) { type external struct{} func (external) HasSynced() bool { return true } -func (external) Run() { return } +func (external) Run() {} func (external) Stop() error { return nil } func (external) EpIndexReverse(string) []*object.Endpoints { return nil } func (external) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/federation.go b/plugin/kubernetes/federation.go deleted file mode 100644 index bf169b911d5..00000000000 --- a/plugin/kubernetes/federation.go +++ /dev/null @@ -1,51 +0,0 @@ -package kubernetes - -import ( - "errors" - - "github.com/coredns/coredns/plugin/etcd/msg" - "github.com/coredns/coredns/plugin/pkg/dnsutil" - "github.com/coredns/coredns/request" -) - -// The federation node.Labels keys used. -const ( - // TODO: Do not hardcode these labels. Pull them out of the API instead. - // - // We can get them via .... - // import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - // metav1.LabelZoneFailureDomain - // metav1.LabelZoneRegion - // - // But importing above breaks coredns with flag collision of 'log_dir' - - LabelZone = "failure-domain.beta.kubernetes.io/zone" - LabelRegion = "failure-domain.beta.kubernetes.io/region" -) - -// Federations is used from the federations plugin to return the service that should be -// returned as a CNAME for federation(s) to work. -func (k *Kubernetes) Federations(state request.Request, fname, fzone string) (msg.Service, error) { - nodeName := k.localNodeName() - node, err := k.APIConn.GetNodeByName(nodeName) - if err != nil { - return msg.Service{}, err - } - r, err := parseRequest(state) - if err != nil { - return msg.Service{}, err - } - - lz := node.Labels[LabelZone] - lr := node.Labels[LabelRegion] - - if lz == "" || lr == "" { - return msg.Service{}, errors.New("local node missing zone/region labels") - } - - if r.endpoint == "" { - return msg.Service{Host: dnsutil.Join(r.service, r.namespace, fname, r.podOrSvc, lz, lr, fzone)}, nil - } - - return msg.Service{Host: dnsutil.Join(r.endpoint, r.service, r.namespace, fname, r.podOrSvc, lz, lr, fzone)}, nil -} diff --git a/plugin/kubernetes/handler.go b/plugin/kubernetes/handler.go index 324e08da69c..6ff8040a4f7 100644 --- a/plugin/kubernetes/handler.go +++ b/plugin/kubernetes/handler.go @@ -28,6 +28,8 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M ) switch state.QType() { + case dns.TypeAXFR, dns.TypeIXFR: + k.Transfer(ctx, state) case dns.TypeA: records, err = plugin.A(ctx, &k, zone, state, nil, plugin.Options{}) case dns.TypeAAAA: @@ -50,11 +52,11 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M break } fallthrough - case dns.TypeAXFR, dns.TypeIXFR: - k.Transfer(ctx, state) default: // Do a fake A lookup, so we can distinguish between NODATA and NXDOMAIN - _, err = plugin.A(ctx, &k, zone, state, nil, plugin.Options{}) + fake := state.NewWithQuestion(state.QName(), dns.TypeA) + fake.Zone = state.Zone + _, err = plugin.A(ctx, &k, zone, fake, nil, plugin.Options{}) } if k.IsNameError(err) { diff --git a/plugin/kubernetes/handler_test.go b/plugin/kubernetes/handler_test.go index 0efd03c0767..186c2a3a609 100644 --- a/plugin/kubernetes/handler_test.go +++ b/plugin/kubernetes/handler_test.go @@ -341,6 +341,38 @@ var dnsTestCases = []test.Case{ test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), }, }, + // NS query for qname != zone (existing domain) + { + Qname: "svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeSuccess, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, + // NS query for qname != zone (existing domain) + { + Qname: "testns.svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeSuccess, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, + // NS query for qname != zone (non existing domain) + { + Qname: "foo.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeNameError, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, + // NS query for qname != zone (non existing domain) + { + Qname: "foo.svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeNameError, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, } func TestServeDNS(t *testing.T) { @@ -489,7 +521,7 @@ type APIConnServeTest struct { } func (a APIConnServeTest) HasSynced() bool { return !a.notSynced } -func (APIConnServeTest) Run() { return } +func (APIConnServeTest) Run() {} func (APIConnServeTest) Stop() error { return nil } func (APIConnServeTest) EpIndexReverse(string) []*object.Endpoints { return nil } func (APIConnServeTest) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go index 1d9a7ec740c..68176d78c4d 100644 --- a/plugin/kubernetes/kubernetes.go +++ b/plugin/kubernetes/kubernetes.go @@ -43,11 +43,10 @@ type Kubernetes struct { Fall fall.F ttl uint32 opts dnsControlOpts - - primaryZoneIndex int - interfaceAddrsFunc func() net.IP - autoPathSearch []string // Local search path from /etc/resolv.conf. Needed for autopath. - TransferTo []string + primaryZoneIndex int + localIPs []net.IP + autoPathSearch []string // Local search path from /etc/resolv.conf. Needed for autopath. + TransferTo []string } // New returns a initialized Kubernetes. It default interfaceAddrFunc to return 127.0.0.1. All other @@ -56,7 +55,6 @@ func New(zones []string) *Kubernetes { k := new(Kubernetes) k.Zones = zones k.Namespaces = make(map[string]struct{}) - k.interfaceAddrsFunc = func() net.IP { return net.ParseIP("127.0.0.1") } k.podMode = podModeDisabled k.ttl = defaultTTL @@ -68,7 +66,7 @@ const ( podModeDisabled = "disabled" // podModeVerified is where Pod requests are answered only if they exist podModeVerified = "verified" - // podModeInsecure is where pod requests are answered without verfying they exist + // podModeInsecure is where pod requests are answered without verifying they exist podModeInsecure = "insecure" // DNSSchemaVersion is the schema version: https://github.com/kubernetes/dns/blob/master/docs/specification.md DNSSchemaVersion = "1.0.1" @@ -107,25 +105,33 @@ func (k *Kubernetes) Services(ctx context.Context, state request.Request, exact case dns.TypeNS: // We can only get here if the qname equals the zone, see ServeDNS in handler.go. - ns := k.nsAddr() - svc := msg.Service{Host: ns.A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl} - return []msg.Service{svc}, nil + nss := k.nsAddrs(false, state.Zone) + var svcs []msg.Service + for _, ns := range nss { + if ns.Header().Rrtype == dns.TypeA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.A).A.String(), Key: msg.Path(ns.Header().Name, coredns), TTL: k.ttl}) + continue + } + if ns.Header().Rrtype == dns.TypeAAAA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.AAAA).AAAA.String(), Key: msg.Path(ns.Header().Name, coredns), TTL: k.ttl}) + } + } + return svcs, nil } if isDefaultNS(state.Name(), state.Zone) { - ns := k.nsAddr() - - isIPv4 := ns.A.To4() != nil - - if !((state.QType() == dns.TypeA && isIPv4) || (state.QType() == dns.TypeAAAA && !isIPv4)) { - // NODATA - return nil, nil + nss := k.nsAddrs(false, state.Zone) + var svcs []msg.Service + for _, ns := range nss { + if ns.Header().Rrtype == dns.TypeA && state.QType() == dns.TypeA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.A).A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl}) + continue + } + if ns.Header().Rrtype == dns.TypeAAAA && state.QType() == dns.TypeAAAA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.AAAA).AAAA.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl}) + } } - - // If this is an A request for "ns.dns", respond with a "fake" record for coredns. - // SOA records always use this hardcoded name - svc := msg.Service{Host: ns.A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl} - return []msg.Service{svc}, nil + return svcs, nil } s, e := k.Records(ctx, state, false) @@ -179,7 +185,7 @@ func (k *Kubernetes) getClientConfig() (*rest.Config, error) { } // Connect to API from out of cluster - // Only the first one is used. We will deprecated multiple endpoints later. + // Only the first one is used. We will deprecate multiple endpoints later. clusterinfo.Server = k.APIServerList[0] if len(k.APICertAuth) > 0 { diff --git a/plugin/kubernetes/kubernetes_apex_test.go b/plugin/kubernetes/kubernetes_apex_test.go index 9a91ea2e2b8..5d2f4079b1b 100644 --- a/plugin/kubernetes/kubernetes_apex_test.go +++ b/plugin/kubernetes/kubernetes_apex_test.go @@ -2,6 +2,7 @@ package kubernetes import ( "context" + "net" "testing" "github.com/coredns/coredns/plugin/pkg/dnstest" @@ -63,6 +64,7 @@ func TestServeDNSApex(t *testing.T) { k := New([]string{"cluster.local."}) k.APIConn = &APIConnServeTest{} k.Next = test.NextHandler(dns.RcodeSuccess, nil) + k.localIPs = []net.IP{net.ParseIP("127.0.0.1")} ctx := context.TODO() for i, tc := range kubeApexCases { @@ -85,7 +87,7 @@ func TestServeDNSApex(t *testing.T) { } if err := test.SortAndCheck(resp, tc); err != nil { - t.Error(err) + t.Errorf("Test %d: %v", i, err) } } } diff --git a/plugin/kubernetes/kubernetes_test.go b/plugin/kubernetes/kubernetes_test.go index 57b735d7b47..c0cf2535573 100644 --- a/plugin/kubernetes/kubernetes_test.go +++ b/plugin/kubernetes/kubernetes_test.go @@ -61,7 +61,7 @@ func TestEndpointHostname(t *testing.T) { type APIConnServiceTest struct{} func (APIConnServiceTest) HasSynced() bool { return true } -func (APIConnServiceTest) Run() { return } +func (APIConnServiceTest) Run() {} func (APIConnServiceTest) Stop() error { return nil } func (APIConnServiceTest) PodIndex(string) []*object.Pod { return nil } func (APIConnServiceTest) SvcIndexReverse(string) []*object.Service { return nil } @@ -310,31 +310,39 @@ func TestServicesAuthority(t *testing.T) { key string } type svcTest struct { - interfaceAddrs func() net.IP - qname string - qtype uint16 - answer *svcAns + localIPs []net.IP + qname string + qtype uint16 + answer []svcAns } tests := []svcTest{ - {interfaceAddrs: func() net.IP { return net.ParseIP("127.0.0.1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA, answer: &svcAns{host: "127.0.0.1", key: "/" + coredns + "/test/interwebs/dns/ns"}}, - {interfaceAddrs: func() net.IP { return net.ParseIP("127.0.0.1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA}, - {interfaceAddrs: func() net.IP { return net.ParseIP("::1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA}, - {interfaceAddrs: func() net.IP { return net.ParseIP("::1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA, answer: &svcAns{host: "::1", key: "/" + coredns + "/test/interwebs/dns/ns"}}, + {localIPs: []net.IP{net.ParseIP("1.2.3.4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA, answer: []svcAns{{host: "1.2.3.4", key: "/" + coredns + "/test/interwebs/dns/ns"}}}, + {localIPs: []net.IP{net.ParseIP("1.2.3.4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA}, + {localIPs: []net.IP{net.ParseIP("1:2::3:4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA}, + {localIPs: []net.IP{net.ParseIP("1:2::3:4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA, answer: []svcAns{{host: "1:2::3:4", key: "/" + coredns + "/test/interwebs/dns/ns"}}}, + { + localIPs: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("1:2::3:4")}, + qname: "ns.dns.interwebs.test.", + qtype: dns.TypeNS, answer: []svcAns{ + {host: "1.2.3.4", key: "/" + coredns + "/test/interwebs/dns/ns"}, + {host: "1:2::3:4", key: "/" + coredns + "/test/interwebs/dns/ns"}, + }, + }, } for i, test := range tests { - k.interfaceAddrsFunc = test.interfaceAddrs + k.localIPs = test.localIPs state := request.Request{ Req: &dns.Msg{Question: []dns.Question{{Name: test.qname, Qtype: test.qtype}}}, - Zone: "interwebs.test.", // must match from k.Zones[0] + Zone: k.Zones[0], } svcs, e := k.Services(context.TODO(), state, false, plugin.Options{}) if e != nil { t.Errorf("Test %d: got error '%v'", i, e) continue } - if test.answer != nil && len(svcs) != 1 { + if test.answer != nil && len(svcs) != len(test.answer) { t.Errorf("Test %d, expected 1 answer, got %v", i, len(svcs)) continue } @@ -347,11 +355,13 @@ func TestServicesAuthority(t *testing.T) { continue } - if test.answer.host != svcs[0].Host { - t.Errorf("Test %d, expected host '%v', got '%v'", i, test.answer.host, svcs[0].Host) - } - if test.answer.key != svcs[0].Key { - t.Errorf("Test %d, expected key '%v', got '%v'", i, test.answer.key, svcs[0].Key) + for i, answer := range test.answer { + if answer.host != svcs[i].Host { + t.Errorf("Test %d, expected host '%v', got '%v'", i, answer.host, svcs[i].Host) + } + if answer.key != svcs[i].Key { + t.Errorf("Test %d, expected key '%v', got '%v'", i, answer.key, svcs[i].Key) + } } } } diff --git a/plugin/kubernetes/local.go b/plugin/kubernetes/local.go index e15fec497f1..d092550613c 100644 --- a/plugin/kubernetes/local.go +++ b/plugin/kubernetes/local.go @@ -2,37 +2,54 @@ package kubernetes import ( "net" + + "github.com/caddyserver/caddy" + "github.com/coredns/coredns/core/dnsserver" ) -func localPodIP() net.IP { - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil +// boundIPs returns the list of non-loopback IPs that CoreDNS is bound to +func boundIPs(c *caddy.Controller) (ips []net.IP) { + conf := dnsserver.GetConfig(c) + hosts := conf.ListenHosts + if hosts == nil || hosts[0] == "" { + hosts = nil + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil + } + for _, addr := range addrs { + hosts = append(hosts, addr.String()) + } } - - for _, addr := range addrs { - ip, _, _ := net.ParseCIDR(addr.String()) - ip = ip.To4() - if ip == nil || ip.IsLoopback() { + for _, host := range hosts { + ip, _, _ := net.ParseCIDR(host) + ip4 := ip.To4() + if ip4 != nil && !ip4.IsLoopback() { + ips = append(ips, ip4) continue } - return ip + ip6 := ip.To16() + if ip6 != nil && !ip6.IsLoopback() { + ips = append(ips, ip6) + } } - return nil + return ips } -func (k *Kubernetes) localNodeName() string { - localIP := k.interfaceAddrsFunc() - if localIP == nil { +// LocalNodeName is exclusively used in federation plugin, will be deprecated later. +func (k *Kubernetes) LocalNodeName() string { + if len(k.localIPs) == 0 { return "" } - // Find endpoint matching localIP - for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { - for _, eps := range ep.Subsets { - for _, addr := range eps.Addresses { - if localIP.Equal(net.ParseIP(addr.IP)) { - return addr.NodeName + // Find fist endpoint matching any localIP + for _, localIP := range k.localIPs { + for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { + for _, eps := range ep.Subsets { + for _, addr := range eps.Addresses { + if localIP.Equal(net.ParseIP(addr.IP)) { + return addr.NodeName + } } } } diff --git a/plugin/kubernetes/metrics.go b/plugin/kubernetes/metrics.go new file mode 100644 index 00000000000..5547add89cc --- /dev/null +++ b/plugin/kubernetes/metrics.go @@ -0,0 +1,75 @@ +package kubernetes + +import ( + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/kubernetes/object" + "github.com/prometheus/client_golang/prometheus" + api "k8s.io/api/core/v1" +) + +const ( + subsystem = "kubernetes" +) + +var ( + // DnsProgrammingLatency is defined as the time it took to program a DNS instance - from the time + // a service or pod has changed to the time the change was propagated and was available to be + // served by a DNS server. + // The definition of this SLI can be found at https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md + // Note that the metrics is partially based on the time exported by the endpoints controller on + // the master machine. The measurement may be inaccurate if there is a clock drift between the + // node and master machine. + // The service_kind label can be one of: + // * cluster_ip + // * headless_with_selector + // * headless_without_selector + DnsProgrammingLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "dns_programming_duration_seconds", + // From 1 millisecond to ~17 minutes. + Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), + Help: "Histogram of the time (in seconds) it took to program a dns instance.", + }, []string{"service_kind"}) + + // durationSinceFunc returns the duration elapsed since the given time. + // Added as a global variable to allow injection for testing. + durationSinceFunc = time.Since +) + +func recordDNSProgrammingLatency(svcs []*object.Service, endpoints *api.Endpoints) { + // getLastChangeTriggerTime is the time.Time value of the EndpointsLastChangeTriggerTime + // annotation stored in the given endpoints object or the "zero" time if the annotation wasn't set + var lastChangeTriggerTime time.Time + stringVal, ok := endpoints.Annotations[api.EndpointsLastChangeTriggerTime] + if ok { + ts, err := time.Parse(time.RFC3339Nano, stringVal) + if err != nil { + log.Warningf("DnsProgrammingLatency cannot be calculated for Endpoints '%s/%s'; invalid %q annotation RFC3339 value of %q", + endpoints.GetNamespace(), endpoints.GetName(), api.EndpointsLastChangeTriggerTime, stringVal) + // In case of error val = time.Zero, which is ignored in the upstream code. + } + lastChangeTriggerTime = ts + } + + // isHeadless indicates whether the endpoints object belongs to a headless + // service (i.e. clusterIp = None). Note that this can be a false negatives if the service + // informer is lagging, i.e. we may not see a recently created service. Given that the services + // don't change very often (comparing to much more frequent endpoints changes), cases when this method + // will return wrong answer should be relatively rare. Because of that we intentionally accept this + // flaw to keep the solution simple. + isHeadless := len(svcs) == 1 && svcs[0].ClusterIP == api.ClusterIPNone + + if endpoints == nil || !isHeadless || lastChangeTriggerTime.IsZero() { + return + } + + // If we're here it means that the Endpoints object is for a headless service and that + // the Endpoints object was created by the endpoints-controller (because the + // LastChangeTriggerTime annotation is set). It means that the corresponding service is a + // "headless service with selector". + DnsProgrammingLatency.WithLabelValues("headless_with_selector"). + Observe(durationSinceFunc(lastChangeTriggerTime).Seconds()) +} diff --git a/plugin/kubernetes/metrics_test.go b/plugin/kubernetes/metrics_test.go new file mode 100644 index 00000000000..96039c62f45 --- /dev/null +++ b/plugin/kubernetes/metrics_test.go @@ -0,0 +1,132 @@ +package kubernetes + +import ( + "strings" + "testing" + "time" + + "github.com/coredns/coredns/plugin/kubernetes/object" + + "github.com/prometheus/client_golang/prometheus/testutil" + api "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +const ( + namespace = "testns" +) + +func TestDnsProgrammingLatency(t *testing.T) { + client := fake.NewSimpleClientset() + now := time.Now() + controller := newdnsController(client, dnsControlOpts{ + initEndpointsCache: true, + // This is needed as otherwise the fake k8s client doesn't work properly. + skipAPIObjectsCleanup: true, + }) + durationSinceFunc = func(t time.Time) time.Duration { + return now.Sub(t) + } + DnsProgrammingLatency.Reset() + go controller.Run() + + subset1 := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", Hostname: "foo"}}, + }} + + subset2 := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.5", Hostname: "foo"}}, + }} + + createService(t, client, controller, "my-service", api.ClusterIPNone) + createEndpoints(t, client, "my-service", now.Add(-2*time.Second), subset1) + updateEndpoints(t, client, "my-service", now.Add(-1*time.Second), subset2) + + createEndpoints(t, client, "endpoints-no-service", now.Add(-4*time.Second), nil) + + createService(t, client, controller, "clusterIP-service", "10.40.0.12") + createEndpoints(t, client, "clusterIP-service", now.Add(-8*time.Second), nil) + + createService(t, client, controller, "headless-no-annotation", api.ClusterIPNone) + createEndpoints(t, client, "headless-no-annotation", nil, nil) + + createService(t, client, controller, "headless-wrong-annotation", api.ClusterIPNone) + createEndpoints(t, client, "headless-wrong-annotation", "wrong-value", nil) + + controller.Stop() + expected := ` + # HELP coredns_kubernetes_dns_programming_duration_seconds Histogram of the time (in seconds) it took to program a dns instance. + # TYPE coredns_kubernetes_dns_programming_duration_seconds histogram + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.001"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.002"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.004"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.008"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.016"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.032"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.064"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.128"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.256"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.512"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="1.024"} 1 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="2.048"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="4.096"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="8.192"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="16.384"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="32.768"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="65.536"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="131.072"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="262.144"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="524.288"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="+Inf"} 2 + coredns_kubernetes_dns_programming_duration_seconds_sum{service_kind="headless_with_selector"} 3 + coredns_kubernetes_dns_programming_duration_seconds_count{service_kind="headless_with_selector"} 2 + ` + if err := testutil.CollectAndCompare(DnsProgrammingLatency, strings.NewReader(expected)); err != nil { + t.Error(err) + } +} + +func buildEndpoints(name string, lastChangeTriggerTime interface{}, subsets []api.EndpointSubset) *api.Endpoints { + annotations := make(map[string]string) + switch v := lastChangeTriggerTime.(type) { + case string: + annotations[api.EndpointsLastChangeTriggerTime] = v + case time.Time: + annotations[api.EndpointsLastChangeTriggerTime] = v.Format(time.RFC3339Nano) + } + return &api.Endpoints{ + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: name, Annotations: annotations}, + Subsets: subsets, + } +} + +func createEndpoints(t *testing.T, client kubernetes.Interface, name string, triggerTime interface{}, subsets []api.EndpointSubset) { + _, err := client.CoreV1().Endpoints(namespace).Create(buildEndpoints(name, triggerTime, subsets)) + if err != nil { + t.Fatal(err) + } +} + +func updateEndpoints(t *testing.T, client kubernetes.Interface, name string, triggerTime interface{}, subsets []api.EndpointSubset) { + _, err := client.CoreV1().Endpoints(namespace).Update(buildEndpoints(name, triggerTime, subsets)) + if err != nil { + t.Fatal(err) + } +} + +func createService(t *testing.T, client kubernetes.Interface, controller dnsController, name string, clusterIp string) { + if _, err := client.CoreV1().Services(namespace).Create(&api.Service{ + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: name}, + Spec: api.ServiceSpec{ClusterIP: clusterIp}, + }); err != nil { + t.Fatal(err) + } + if err := wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) { + return len(controller.SvcIndex(object.ServiceKey(name, namespace))) == 1, nil + }); err != nil { + t.Fatal(err) + } +} diff --git a/plugin/kubernetes/ns.go b/plugin/kubernetes/ns.go index f3d33ee22d2..2d4bc398a5c 100644 --- a/plugin/kubernetes/ns.go +++ b/plugin/kubernetes/ns.go @@ -4,6 +4,7 @@ import ( "net" "strings" + "github.com/coredns/coredns/plugin/kubernetes/object" "github.com/miekg/dns" api "k8s.io/api/core/v1" ) @@ -12,54 +13,78 @@ func isDefaultNS(name, zone string) bool { return strings.Index(name, defaultNSName) == 0 && strings.Index(name, zone) == len(defaultNSName) } -// nsAddr return the A record for the CoreDNS service in the cluster. If it fails that it fallsback -// on the local address of the machine we're running on. -// -// This function is rather expensive to run. -func (k *Kubernetes) nsAddr() *dns.A { +// nsAddrs returns the A or AAAA records for the CoreDNS service in the cluster. If the service cannot be found, +// it returns a record for the local address of the machine we're running on. +func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { var ( - svcName string - svcNamespace string + svcNames []string + svcIPs []net.IP ) - rr := new(dns.A) - localIP := k.interfaceAddrsFunc() - rr.A = localIP + // Find the CoreDNS Endpoints + for _, localIP := range k.localIPs { + endpoints := k.APIConn.EpIndexReverse(localIP.String()) -FindEndpoint: - for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { - for _, eps := range ep.Subsets { - for _, addr := range eps.Addresses { - if localIP.Equal(net.ParseIP(addr.IP)) { - svcNamespace = ep.Namespace - svcName = ep.Name - break FindEndpoint + // Collect IPs for all Services of the Endpoints + for _, endpoint := range endpoints { + svcs := k.APIConn.SvcIndex(object.ServiceKey(endpoint.Name, endpoint.Namespace)) + for _, svc := range svcs { + if external { + svcName := strings.Join([]string{svc.Name, svc.Namespace, zone}, ".") + for _, exIP := range svc.ExternalIPs { + svcNames = append(svcNames, svcName) + svcIPs = append(svcIPs, net.ParseIP(exIP)) + } + continue + } + svcName := strings.Join([]string{svc.Name, svc.Namespace, Svc, zone}, ".") + if svc.ClusterIP == api.ClusterIPNone { + // For a headless service, use the endpoints IPs + for _, s := range endpoint.Subsets { + for _, a := range s.Addresses { + svcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+"."+svcName) + svcIPs = append(svcIPs, net.ParseIP(a.IP)) + } + } + } else { + svcNames = append(svcNames, svcName) + svcIPs = append(svcIPs, net.ParseIP(svc.ClusterIP)) } } } } - if len(svcName) == 0 { - rr.Hdr.Name = defaultNSName - rr.A = localIP - return rr + // If no local IPs matched any endpoints, use the localIPs directly + if len(svcIPs) == 0 { + svcIPs = make([]net.IP, len(k.localIPs)) + svcNames = make([]string, len(k.localIPs)) + for i, localIP := range k.localIPs { + svcNames[i] = defaultNSName + zone + svcIPs[i] = localIP + } } -FindService: - for _, svc := range k.APIConn.ServiceList() { - if svcName == svc.Name && svcNamespace == svc.Namespace { - if svc.ClusterIP == api.ClusterIPNone { - rr.A = localIP - } else { - rr.A = net.ParseIP(svc.ClusterIP) - } - break FindService + // Create an RR slice of collected IPs + rrs := make([]dns.RR, len(svcIPs)) + for i, ip := range svcIPs { + if ip.To4() == nil { + rr := new(dns.AAAA) + rr.Hdr.Class = dns.ClassINET + rr.Hdr.Rrtype = dns.TypeAAAA + rr.Hdr.Name = svcNames[i] + rr.AAAA = ip + rrs[i] = rr + continue } + rr := new(dns.A) + rr.Hdr.Class = dns.ClassINET + rr.Hdr.Rrtype = dns.TypeA + rr.Hdr.Name = svcNames[i] + rr.A = ip + rrs[i] = rr } - rr.Hdr.Name = strings.Join([]string{svcName, svcNamespace, "svc."}, ".") - - return rr + return rrs } const defaultNSName = "ns.dns." diff --git a/plugin/kubernetes/ns_test.go b/plugin/kubernetes/ns_test.go index df7cf5f83d5..bafe53240e7 100644 --- a/plugin/kubernetes/ns_test.go +++ b/plugin/kubernetes/ns_test.go @@ -1,9 +1,11 @@ package kubernetes import ( + "net" "testing" "github.com/coredns/coredns/plugin/kubernetes/object" + "github.com/miekg/dns" api "k8s.io/api/core/v1" ) @@ -11,15 +13,26 @@ import ( type APIConnTest struct{} func (APIConnTest) HasSynced() bool { return true } -func (APIConnTest) Run() { return } +func (APIConnTest) Run() {} func (APIConnTest) Stop() error { return nil } func (APIConnTest) PodIndex(string) []*object.Pod { return nil } -func (APIConnTest) SvcIndex(string) []*object.Service { return nil } func (APIConnTest) SvcIndexReverse(string) []*object.Service { return nil } func (APIConnTest) EpIndex(string) []*object.Endpoints { return nil } func (APIConnTest) EndpointsList() []*object.Endpoints { return nil } func (APIConnTest) Modified() int64 { return 0 } +func (a APIConnTest) SvcIndex(s string) []*object.Service { + switch s { + case "dns-service.kube-system": + return []*object.Service{a.ServiceList()[0]} + case "hdls-dns-service.kube-system": + return []*object.Service{a.ServiceList()[1]} + case "dns6-service.kube-system": + return []*object.Service{a.ServiceList()[2]} + } + return nil +} + func (APIConnTest) ServiceList() []*object.Service { svcs := []*object.Service{ { @@ -27,18 +40,31 @@ func (APIConnTest) ServiceList() []*object.Service { Namespace: "kube-system", ClusterIP: "10.0.0.111", }, + { + Name: "hdls-dns-service", + Namespace: "kube-system", + ClusterIP: api.ClusterIPNone, + }, + { + Name: "dns6-service", + Namespace: "kube-system", + ClusterIP: "10::111", + }, } return svcs } -func (APIConnTest) EpIndexReverse(string) []*object.Endpoints { +func (APIConnTest) EpIndexReverse(ip string) []*object.Endpoints { + if ip != "10.244.0.20" { + return nil + } eps := []*object.Endpoints{ { Subsets: []object.EndpointSubset{ { Addresses: []object.EndpointAddress{ { - IP: "127.0.0.1", + IP: "10.244.0.20", }, }, }, @@ -46,6 +72,32 @@ func (APIConnTest) EpIndexReverse(string) []*object.Endpoints { Name: "dns-service", Namespace: "kube-system", }, + { + Subsets: []object.EndpointSubset{ + { + Addresses: []object.EndpointAddress{ + { + IP: "10.244.0.20", + }, + }, + }, + }, + Name: "hdls-dns-service", + Namespace: "kube-system", + }, + { + Subsets: []object.EndpointSubset{ + { + Addresses: []object.EndpointAddress{ + { + IP: "10.244.0.20", + }, + }, + }, + }, + Name: "dns6-service", + Namespace: "kube-system", + }, } return eps } @@ -55,19 +107,43 @@ func (APIConnTest) GetNamespaceByName(name string) (*api.Namespace, error) { return &api.Namespace{}, nil } -func TestNsAddr(t *testing.T) { +func TestNsAddrs(t *testing.T) { k := New([]string{"inter.webs.test."}) k.APIConn = &APIConnTest{} + k.localIPs = []net.IP{net.ParseIP("10.244.0.20")} - cdr := k.nsAddr() - expected := "10.0.0.111" + cdrs := k.nsAddrs(false, k.Zones[0]) - if cdr.A.String() != expected { - t.Errorf("Expected A to be %q, got %q", expected, cdr.A.String()) + if len(cdrs) != 3 { + t.Fatalf("Expected 3 results, got %v", len(cdrs)) + + } + cdr := cdrs[0] + expected := "10.0.0.111" + if cdr.(*dns.A).A.String() != expected { + t.Errorf("Expected 1st A to be %q, got %q", expected, cdr.(*dns.A).A.String()) + } + expected = "dns-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected 1st Header Name to be %q, got %q", expected, cdr.Header().Name) + } + cdr = cdrs[1] + expected = "10.244.0.20" + if cdr.(*dns.A).A.String() != expected { + t.Errorf("Expected 2nd A to be %q, got %q", expected, cdr.(*dns.A).A.String()) + } + expected = "10-244-0-20.hdls-dns-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected 2nd Header Name to be %q, got %q", expected, cdr.Header().Name) + } + cdr = cdrs[2] + expected = "10::111" + if cdr.(*dns.AAAA).AAAA.String() != expected { + t.Errorf("Expected AAAA to be %q, got %q", expected, cdr.(*dns.A).A.String()) } - expected = "dns-service.kube-system.svc." - if cdr.Hdr.Name != expected { - t.Errorf("Expected Hdr.Name to be %q, got %q", expected, cdr.Hdr.Name) + expected = "dns6-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected AAAA Header Name to be %q, got %q", expected, cdr.Header().Name) } } diff --git a/plugin/kubernetes/object/endpoint.go b/plugin/kubernetes/object/endpoint.go index aa93b4ceb50..c7d6b7323c4 100644 --- a/plugin/kubernetes/object/endpoint.go +++ b/plugin/kubernetes/object/endpoint.go @@ -43,11 +43,11 @@ type EndpointPort struct { // EndpointsKey return a string using for the index. func EndpointsKey(name, namespace string) string { return name + "." + namespace } -// ToEndpoints converts an api.Service to a *Service. -func ToEndpoints(obj interface{}) interface{} { +// ToEndpoints converts an api.Endpoints to a *Endpoints. +func ToEndpoints(obj interface{}) (*api.Endpoints, *Endpoints) { end, ok := obj.(*api.Endpoints) if !ok { - return nil + return nil, nil } e := &Endpoints{ @@ -62,7 +62,7 @@ func ToEndpoints(obj interface{}) interface{} { Addresses: make([]EndpointAddress, len(eps.Addresses)), } if len(eps.Ports) == 0 { - // Add sentinal if there are no ports. + // Add sentinel if there are no ports. sub.Ports = []EndpointPort{{Port: -1}} } else { sub.Ports = make([]EndpointPort, len(eps.Ports)) @@ -93,9 +93,7 @@ func ToEndpoints(obj interface{}) interface{} { } } - *end = api.Endpoints{} - - return e + return end, e } // CopyWithoutSubsets copies e, without the subsets. diff --git a/plugin/kubernetes/object/informer.go b/plugin/kubernetes/object/informer.go index 9336571dc10..bd4d05d3055 100644 --- a/plugin/kubernetes/object/informer.go +++ b/plugin/kubernetes/object/informer.go @@ -1,25 +1,29 @@ package object import ( - "time" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ) -// NewIndexerInformer is a copy of the cache.NewIndexInformer function, but allows Process to have a conversion function (ToFunc). -func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h cache.ResourceEventHandler, indexers cache.Indexers, convert ToFunc) (cache.Indexer, cache.Controller) { +// NewIndexerInformer is a copy of the cache.NewIndexerInformer function, but allows custom process function +func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, h cache.ResourceEventHandler, indexers cache.Indexers, builder ProcessorBuilder) (cache.Indexer, cache.Controller) { clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, indexers) - fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, clientState) - cfg := &cache.Config{ - Queue: fifo, + Queue: cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, clientState), ListerWatcher: lw, ObjectType: objType, - FullResyncPeriod: resyncPeriod, + FullResyncPeriod: defaultResyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: builder(clientState, h), + } + return clientState, cache.New(cfg) +} + +// DefaultProcessor is a copy of Process function from cache.NewIndexerInformer except it does a conversion. +func DefaultProcessor(convert ToFunc) ProcessorBuilder { + return func(clientState cache.Indexer, h cache.ResourceEventHandler) cache.ProcessFunc { + return func(obj interface{}) error { for _, d := range obj.(cache.Deltas) { obj := convert(d.Object) @@ -45,7 +49,8 @@ func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPe } } return nil - }, + } } - return clientState, cache.New(cfg) } + +const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/object/object.go b/plugin/kubernetes/object/object.go index 3809aaaf468..132b5be6c6a 100644 --- a/plugin/kubernetes/object/object.go +++ b/plugin/kubernetes/object/object.go @@ -16,18 +16,22 @@ package object import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" ) // ToFunc converts one empty interface to another. type ToFunc func(interface{}) interface{} +// ProcessorBuilder returns function to process cache events. +type ProcessorBuilder func(cache.Indexer, cache.ResourceEventHandler) cache.ProcessFunc + // Empty is an empty struct. type Empty struct{} -// GetObjectKind implementss the ObjectKind interface as a noop. +// GetObjectKind implements the ObjectKind interface as a noop. func (e *Empty) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } // GetGenerateName implements the metav1.Object interface. diff --git a/plugin/kubernetes/object/pod.go b/plugin/kubernetes/object/pod.go index 072d8d56d87..9fc9b5726f9 100644 --- a/plugin/kubernetes/object/pod.go +++ b/plugin/kubernetes/object/pod.go @@ -16,8 +16,14 @@ type Pod struct { *Empty } -// ToPod converts an api.Pod to a *Pod. -func ToPod(obj interface{}) interface{} { +// ToPod returns a function that converts an api.Pod to a *Pod. +func ToPod(skipCleanup bool) ToFunc { + return func(obj interface{}) interface{} { + return toPod(skipCleanup, obj) + } +} + +func toPod(skipCleanup bool, obj interface{}) interface{} { pod, ok := obj.(*api.Pod) if !ok { return nil @@ -35,7 +41,9 @@ func ToPod(obj interface{}) interface{} { return nil } - *pod = api.Pod{} + if !skipCleanup { + *pod = api.Pod{} + } return p } diff --git a/plugin/kubernetes/object/service.go b/plugin/kubernetes/object/service.go index 3c830436248..295715e2dc0 100644 --- a/plugin/kubernetes/object/service.go +++ b/plugin/kubernetes/object/service.go @@ -26,8 +26,14 @@ type Service struct { // ServiceKey return a string using for the index. func ServiceKey(name, namespace string) string { return name + "." + namespace } -// ToService converts an api.Service to a *Service. -func ToService(obj interface{}) interface{} { +// ToService returns a function that converts an api.Service to a *Service. +func ToService(skipCleanup bool) ToFunc { + return func(obj interface{}) interface{} { + return toService(skipCleanup, obj) + } +} + +func toService(skipCleanup bool, obj interface{}) interface{} { svc, ok := obj.(*api.Service) if !ok { return nil @@ -46,7 +52,7 @@ func ToService(obj interface{}) interface{} { } if len(svc.Spec.Ports) == 0 { - // Add sentinal if there are no ports. + // Add sentinel if there are no ports. s.Ports = []api.ServicePort{{Port: -1}} } else { s.Ports = make([]api.ServicePort, len(svc.Spec.Ports)) @@ -58,7 +64,9 @@ func ToService(obj interface{}) interface{} { s.ExternalIPs[li+i] = lb.IP } - *svc = api.Service{} + if !skipCleanup { + *svc = api.Service{} + } return s } diff --git a/plugin/kubernetes/parse.go b/plugin/kubernetes/parse.go index 60d2d340242..c16adc4cab3 100644 --- a/plugin/kubernetes/parse.go +++ b/plugin/kubernetes/parse.go @@ -78,7 +78,7 @@ func parseRequest(state request.Request) (r recordRequest, err error) { return r, nil } - // Because of ambiquity we check the labels left: 1: an endpoint. 2: port and protocol. + // Because of ambiguity we check the labels left: 1: an endpoint. 2: port and protocol. // Anything else is a query that is too long to answer and can safely be delegated to return an nxdomain. switch last { diff --git a/plugin/kubernetes/parse_test.go b/plugin/kubernetes/parse_test.go index 0ce4b7d98da..6fc635477e2 100644 --- a/plugin/kubernetes/parse_test.go +++ b/plugin/kubernetes/parse_test.go @@ -37,7 +37,7 @@ func TestParseRequest(t *testing.T) { } rs := r.String() if rs != tc.expected { - t.Errorf("Test %d, expected (stringyfied) recordRequest: %s, got %s", i, tc.expected, rs) + t.Errorf("Test %d, expected (stringified) recordRequest: %s, got %s", i, tc.expected, rs) } } } diff --git a/plugin/kubernetes/reverse_test.go b/plugin/kubernetes/reverse_test.go index 3c88fd4d51c..2af72522e8f 100644 --- a/plugin/kubernetes/reverse_test.go +++ b/plugin/kubernetes/reverse_test.go @@ -16,7 +16,7 @@ import ( type APIConnReverseTest struct{} func (APIConnReverseTest) HasSynced() bool { return true } -func (APIConnReverseTest) Run() { return } +func (APIConnReverseTest) Run() {} func (APIConnReverseTest) Stop() error { return nil } func (APIConnReverseTest) PodIndex(string) []*object.Pod { return nil } func (APIConnReverseTest) EpIndex(string) []*object.Endpoints { return nil } diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 028f4ae772c..b35d653e11e 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -2,7 +2,6 @@ package kubernetes import ( "errors" - "flag" "fmt" "os" "strconv" @@ -11,6 +10,7 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" "github.com/coredns/coredns/plugin/pkg/dnsutil" clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/parse" @@ -20,7 +20,7 @@ import ( "github.com/miekg/dns" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - // Pull this in for logtostderr flag parsing + // Pull this in setting klog's output to stdout "k8s.io/klog" // Excluding azure because it is failing to compile @@ -35,26 +35,10 @@ import ( var log = clog.NewWithPlugin("kubernetes") -func init() { - // Kubernetes plugin uses the kubernetes library, which now uses klog, we must set and parse this flag - // so we don't log to the filesystem, which can fill up and crash CoreDNS indirectly by calling os.Exit(). - // We also set: os.Stderr = os.Stdout in the setup function below so we output to standard out; as we do for - // all CoreDNS logging. We can't do *that* in the init function, because we, when starting, also barf some - // things to stderr. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - logtostderr := klogFlags.Lookup("logtostderr") - logtostderr.Value.Set("true") - - caddy.RegisterPlugin("kubernetes", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("kubernetes", setup) } func setup(c *caddy.Controller) error { - // See comment in the init function. - os.Stderr = os.Stdout + klog.SetOutput(os.Stdout) k, err := kubernetesParse(c) if err != nil { @@ -68,11 +52,22 @@ func setup(c *caddy.Controller) error { k.RegisterKubeCache(c) + c.OnStartup(func() error { + metrics.MustRegister(c, DnsProgrammingLatency) + return nil + }) + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { k.Next = next return k }) + // get locally bound addresses + c.OnStartup(func() error { + k.localIPs = boundIPs(c) + return nil + }) + return nil } @@ -125,13 +120,11 @@ func kubernetesParse(c *caddy.Controller) (*Kubernetes, error) { func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { k8s := New([]string{""}) - k8s.interfaceAddrsFunc = localPodIP k8s.autoPathSearch = searchFromResolvConf() opts := dnsControlOpts{ initEndpointsCache: true, ignoreEmptyService: false, - resyncPeriod: defaultResyncPeriod, } k8s.opts = opts @@ -197,7 +190,7 @@ func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { case "endpoint": args := c.RemainingArgs() if len(args) > 0 { - // Multiple endoints are deprecated but still could be specified, + // Multiple endpoints are deprecated but still could be specified, // only the first one be used, though k8s.APIServerList = args if len(args) > 1 { @@ -214,16 +207,7 @@ func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { } return nil, c.ArgErr() case "resyncperiod": - args := c.RemainingArgs() - if len(args) > 0 { - rp, err := time.ParseDuration(args[0]) - if err != nil { - return nil, fmt.Errorf("unable to parse resync duration value: '%v': %v", args[0], err) - } - k8s.opts.resyncPeriod = rp - continue - } - return nil, c.ArgErr() + continue case "labels": args := c.RemainingArgs() if len(args) > 0 { @@ -322,5 +306,3 @@ func searchFromResolvConf() []string { plugin.Zones(rc.Search).Normalize() return rc.Search } - -const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/setup_test.go b/plugin/kubernetes/setup_test.go index fc09f04766f..634401d9d5c 100644 --- a/plugin/kubernetes/setup_test.go +++ b/plugin/kubernetes/setup_test.go @@ -3,7 +3,6 @@ package kubernetes import ( "strings" "testing" - "time" "github.com/coredns/coredns/plugin/pkg/fall" @@ -13,14 +12,13 @@ import ( func TestKubernetesParse(t *testing.T) { tests := []struct { - input string // Corefile data as string - shouldErr bool // true if test case is expected to produce an error. - expectedErrContent string // substring from the expected error. Empty for positive cases. - expectedZoneCount int // expected count of defined zones. - expectedNSCount int // expected count of namespaces. - expectedResyncPeriod time.Duration // expected resync period value - expectedLabelSelector string // expected label selector value - expectedNamespaceLabelSelector string // expected namespace label selector value + input string // Corefile data as string + shouldErr bool // true if test case is expected to produce an error. + expectedErrContent string // substring from the expected error. Empty for positive cases. + expectedZoneCount int // expected count of defined zones. + expectedNSCount int // expected count of namespaces. + expectedLabelSelector string // expected label selector value + expectedNamespaceLabelSelector string // expected namespace label selector value expectedPodMode string expectedFallthrough fall.F }{ @@ -31,7 +29,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -43,7 +40,6 @@ func TestKubernetesParse(t *testing.T) { "", 2, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -56,7 +52,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -70,7 +65,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -84,7 +78,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 1, - defaultResyncPeriod, "", "", podModeDisabled, @@ -98,35 +91,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 2, - defaultResyncPeriod, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 30s -}`, - false, - "", - 1, - 0, - 30 * time.Second, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 15m -}`, - false, - "", - 1, - 0, - 15 * time.Minute, "", "", podModeDisabled, @@ -140,7 +104,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "environment=prod", "", podModeDisabled, @@ -154,7 +117,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "application=nginx,environment in (production,qa,staging)", "", podModeDisabled, @@ -168,7 +130,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "istio-injection=enabled", podModeDisabled, @@ -183,7 +144,6 @@ func TestKubernetesParse(t *testing.T) { "Error during parsing: namespaces and namespace_labels cannot both be set", -1, 0, - defaultResyncPeriod, "", "istio-injection=enabled", podModeDisabled, @@ -191,7 +151,6 @@ func TestKubernetesParse(t *testing.T) { }, { `kubernetes coredns.local test.local { - resyncperiod 15m endpoint http://localhost:8080 namespaces demo test labels environment in (production, staging, qa),application=nginx @@ -201,7 +160,6 @@ func TestKubernetesParse(t *testing.T) { "", 2, 2, - 15 * time.Minute, "application=nginx,environment in (production,qa,staging)", "", podModeDisabled, @@ -216,7 +174,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, -1, - defaultResyncPeriod, "", "", podModeDisabled, @@ -230,49 +187,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, -1, - defaultResyncPeriod, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod -}`, - true, - "rong argument count or unexpected line ending", - -1, - 0, - 0 * time.Minute, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 15 -}`, - true, - "unable to parse resync duration value", - -1, - 0, - 0 * time.Second, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod abc -}`, - true, - "unable to parse resync duration value", - -1, - 0, - 0 * time.Second, "", "", podModeDisabled, @@ -286,7 +200,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, 0, - 0 * time.Second, "", "", podModeDisabled, @@ -300,7 +213,6 @@ func TestKubernetesParse(t *testing.T) { "unable to parse label selector", -1, 0, - 0 * time.Second, "", "", podModeDisabled, @@ -315,7 +227,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -330,7 +241,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeInsecure, @@ -345,7 +255,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeVerified, @@ -360,7 +269,6 @@ func TestKubernetesParse(t *testing.T) { "rong value for pods", -1, 0, - defaultResyncPeriod, "", "", podModeVerified, @@ -375,7 +283,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -389,7 +296,6 @@ kubernetes cluster.local`, "this plugin", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -403,7 +309,6 @@ kubernetes cluster.local`, "Wrong argument count or unexpected line ending after", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -417,7 +322,6 @@ kubernetes cluster.local`, "Wrong argument count or unexpected line ending after", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -431,7 +335,6 @@ kubernetes cluster.local`, "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -480,12 +383,6 @@ kubernetes cluster.local`, t.Errorf("Test %d: Expected kubernetes controller to be initialized with %d namespaces. Instead found %d namespaces: '%v' for input '%s'", i, test.expectedNSCount, foundNSCount, k8sController.Namespaces, test.input) } - // ResyncPeriod - foundResyncPeriod := k8sController.opts.resyncPeriod - if foundResyncPeriod != test.expectedResyncPeriod { - t.Errorf("Test %d: Expected kubernetes controller to be initialized with resync period '%s'. Instead found period '%s' for input '%s'", i, test.expectedResyncPeriod, foundResyncPeriod, test.input) - } - // Labels if k8sController.opts.labelSelector != nil { foundLabelSelectorString := meta.FormatLabelSelector(k8sController.opts.labelSelector) diff --git a/plugin/kubernetes/xfr.go b/plugin/kubernetes/xfr.go index 7ea99fd8daa..7759c5a47f8 100644 --- a/plugin/kubernetes/xfr.go +++ b/plugin/kubernetes/xfr.go @@ -110,14 +110,15 @@ func (k *Kubernetes) transfer(c chan dns.RR, zone string) { case api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer: clusterIP := net.ParseIP(svc.ClusterIP) if clusterIP != nil { - for _, p := range svc.Ports { + s := msg.Service{Host: svc.ClusterIP, TTL: k.ttl} + s.Key = strings.Join(svcBase, "/") - s := msg.Service{Host: svc.ClusterIP, Port: int(p.Port), TTL: k.ttl} - s.Key = strings.Join(svcBase, "/") + // Change host from IP to Name for SRV records + host := emitAddressRecord(c, s) - // Change host from IP to Name for SRV records - host := emitAddressRecord(c, s) - s.Host = host + for _, p := range svc.Ports { + s := msg.Service{Host: host, Port: int(p.Port), TTL: k.ttl} + s.Key = strings.Join(svcBase, "/") // Need to generate this to handle use cases for peer-finder // ref: https://github.com/coredns/coredns/pull/823 @@ -183,7 +184,6 @@ func (k *Kubernetes) transfer(c chan dns.RR, zone string) { } } } - return } // emitAddressRecord generates a new A or AAAA record based on the msg.Service and writes it to diff --git a/plugin/loadbalance/OWNERS b/plugin/loadbalance/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/loadbalance/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/loadbalance/README.md b/plugin/loadbalance/README.md index b29ba4c08ec..81a6580c40f 100644 --- a/plugin/loadbalance/README.md +++ b/plugin/loadbalance/README.md @@ -2,14 +2,14 @@ ## Name -*loadbalance* - randomize the order of A, AAAA and MX records. +*loadbalance* - randomizes the order of A, AAAA and MX records. ## Description -The *loadbalance* will act as a round-robin DNS loadbalancer by randomizing the order of A, AAAA, +The *loadbalance* will act as a round-robin DNS load balancer by randomizing the order of A, AAAA, and MX records in the answer. -See [Wikipedia](https://en.wikipedia.org/wiki/Round-robin_DNS) about the pros and cons on this +See [Wikipedia](https://en.wikipedia.org/wiki/Round-robin_DNS) about the pros and cons of this setup. It will take care to sort any CNAMEs before any address records, because some stub resolver implementations (like glibc) are particular about that. @@ -19,7 +19,7 @@ implementations (like glibc) are particular about that. loadbalance [POLICY] ~~~ -* **POLICY** is how to balance, the default, and only option, is "round_robin". +* **POLICY** is how to balance. The default, and only option, is "round_robin". ## Examples diff --git a/plugin/loadbalance/handler.go b/plugin/loadbalance/handler.go index 4ec79c098f8..ac046c8d073 100644 --- a/plugin/loadbalance/handler.go +++ b/plugin/loadbalance/handler.go @@ -1,4 +1,4 @@ -// Package loadbalance is plugin for rewriting responses to do "load balancing" +// Package loadbalance is a plugin for rewriting responses to do "load balancing" package loadbalance import ( @@ -9,7 +9,7 @@ import ( "github.com/miekg/dns" ) -// RoundRobin is plugin to rewrite responses for "load balancing". +// RoundRobin is a plugin to rewrite responses for "load balancing". type RoundRobin struct { Next plugin.Handler } diff --git a/plugin/loadbalance/setup.go b/plugin/loadbalance/setup.go index c31fc15e161..4c5a6bfd8f8 100644 --- a/plugin/loadbalance/setup.go +++ b/plugin/loadbalance/setup.go @@ -12,12 +12,7 @@ import ( var log = clog.NewWithPlugin("loadbalance") -func init() { - caddy.RegisterPlugin("loadbalance", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("loadbalance", setup) } func setup(c *caddy.Controller) error { err := parse(c) diff --git a/plugin/log/OWNERS b/plugin/log/OWNERS deleted file mode 100644 index 06032ae2e09..00000000000 --- a/plugin/log/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - nchrisdk -approvers: - - miekg - - nchrisdk diff --git a/plugin/log/README.md b/plugin/log/README.md index bbd853bd31f..8b397bffa24 100644 --- a/plugin/log/README.md +++ b/plugin/log/README.md @@ -92,7 +92,7 @@ The default Common Log Format is: Each of these logs will be outputted with `log.Infof`, so a typical example looks like this: ~~~ txt -2018-10-30T19:10:07.547Z [INFO] [::1]:50759 - 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s +[INFO] [::1]:50759 - 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s ~~~~ ## Examples diff --git a/plugin/log/log.go b/plugin/log/log.go index 49581dfc49e..7e62011387a 100644 --- a/plugin/log/log.go +++ b/plugin/log/log.go @@ -26,20 +26,24 @@ type Logger struct { // ServeDNS implements the plugin.Handler interface. func (l Logger) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { state := request.Request{W: w, Req: r} + name := state.Name() for _, rule := range l.Rules { - if !plugin.Name(rule.NameScope).Matches(state.Name()) { + if !plugin.Name(rule.NameScope).Matches(name) { continue } rrw := dnstest.NewRecorder(w) rc, err := plugin.NextOrFailure(l.Name(), l.Next, ctx, rrw, r) - tpe, _ := response.Typify(rrw.Msg, time.Now().UTC()) - class := response.Classify(tpe) // If we don't set up a class in config, the default "all" will be added // and we shouldn't have an empty rule.Class. _, ok := rule.Class[response.All] - _, ok1 := rule.Class[class] + var ok1 bool + if !ok { + tpe, _ := response.Typify(rrw.Msg, time.Now().UTC()) + class := response.Classify(tpe) + _, ok1 = rule.Class[class] + } if ok || ok1 { logstr := l.repl.Replace(ctx, state, rrw, rule.Format) clog.Infof(logstr) diff --git a/plugin/log/log_test.go b/plugin/log/log_test.go index e7f29fff12e..16efb2026f2 100644 --- a/plugin/log/log_test.go +++ b/plugin/log/log_test.go @@ -3,6 +3,7 @@ package log import ( "bytes" "context" + "io/ioutil" "log" "strings" "testing" @@ -239,8 +240,7 @@ func TestLogged(t *testing.T) { } func BenchmarkLogged(b *testing.B) { - var f bytes.Buffer - log.SetOutput(&f) + log.SetOutput(ioutil.Discard) rule := Rule{ NameScope: ".", diff --git a/plugin/log/setup.go b/plugin/log/setup.go index 2e89d1f8108..f9441bf9259 100644 --- a/plugin/log/setup.go +++ b/plugin/log/setup.go @@ -12,12 +12,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("log", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("log", setup) } func setup(c *caddy.Controller) error { rules, err := logParse(c) diff --git a/plugin/loop/OWNERS b/plugin/loop/OWNERS deleted file mode 100644 index 3a4ef23a1f6..00000000000 --- a/plugin/loop/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - chrisohaver -approvers: - - miekg - - chrisohaver diff --git a/plugin/loop/README.md b/plugin/loop/README.md index eb3a1ee90fc..b0fb4835d4c 100644 --- a/plugin/loop/README.md +++ b/plugin/loop/README.md @@ -2,7 +2,7 @@ ## Name -*loop* - detect simple forwarding loops and halt the server. +*loop* - detects simple forwarding loops and halts the server. ## Description diff --git a/plugin/loop/setup.go b/plugin/loop/setup.go index 30f1a25279f..3cf4861ac76 100644 --- a/plugin/loop/setup.go +++ b/plugin/loop/setup.go @@ -13,12 +13,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("loop", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("loop", setup) } func setup(c *caddy.Controller) error { l, err := parse(c) diff --git a/plugin/metadata/OWNERS b/plugin/metadata/OWNERS deleted file mode 100644 index 6d13ad9f14f..00000000000 --- a/plugin/metadata/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - ekleiner - - miekg -approvers: - - ekleiner - - miekg diff --git a/plugin/metadata/README.md b/plugin/metadata/README.md index c69650927d8..9a08bb91a1c 100644 --- a/plugin/metadata/README.md +++ b/plugin/metadata/README.md @@ -2,25 +2,25 @@ ## Name -*metadata* - enable a meta data collector. +*metadata* - enables a metadata collector. ## Description By enabling *metadata* any plugin that implements [metadata.Provider interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) will be called for -each DNS query, at beginning of the process for that query, in order to add it's own meta data to +each DNS query, at the beginning of the process for that query, in order to add its own metadata to context. -The meta data collected will be available for all plugins, via the Context parameter provided in the +The metadata collected will be available for all plugins, via the Context parameter provided in the ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve metadata a plugin might be interested in. -The meta data is added by setting a label with a value in the context. These labels should be named +The metadata is added by setting a label with a value in the context. These labels should be named `plugin/NAME`, where **NAME** is something descriptive. The only hard requirement the *metadata* -plugin enforces is that the labels contains a slash. See the documentation for +plugin enforces is that the labels contain a slash. See the documentation for `metadata.SetValueFunc`. -The value stored is a string. The empty string signals "no meta data". See the documentation for +The value stored is a string. The empty string signals "no metadata". See the documentation for `metadata.ValueFunc` on how to retrieve this. ## Syntax diff --git a/plugin/metadata/setup.go b/plugin/metadata/setup.go index 665216ea8f6..734ea678ee2 100644 --- a/plugin/metadata/setup.go +++ b/plugin/metadata/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("metadata", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("metadata", setup) } func setup(c *caddy.Controller) error { m, err := metadataParse(c) diff --git a/plugin/metrics/OWNERS b/plugin/metrics/OWNERS deleted file mode 100644 index 1b071c69eeb..00000000000 --- a/plugin/metrics/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -reviewers: - - fastest963 - - miekg - - superq - - greenpau -approvers: - - fastest963 - - miekg - - superq diff --git a/plugin/metrics/metrics.go b/plugin/metrics/metrics.go index 2b165e5b902..ddadd7acace 100644 --- a/plugin/metrics/metrics.go +++ b/plugin/metrics/metrics.go @@ -10,6 +10,7 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/metrics/vars" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -95,7 +96,7 @@ func (m *Metrics) ZoneNames() []string { // OnStartup sets up the metrics on startup. func (m *Metrics) OnStartup() error { - ln, err := net.Listen("tcp", m.Addr) + ln, err := reuseport.Listen("tcp", m.Addr) if err != nil { log.Errorf("Failed to start metrics handler: %s", err) return err diff --git a/plugin/metrics/setup.go b/plugin/metrics/setup.go index 744db7cd3b2..719362609a5 100644 --- a/plugin/metrics/setup.go +++ b/plugin/metrics/setup.go @@ -20,12 +20,7 @@ var ( registry = newReg() ) -func init() { - caddy.RegisterPlugin("prometheus", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("prometheus", setup) } func setup(c *caddy.Controller) error { m, err := parse(c) diff --git a/plugin/normalize.go b/plugin/normalize.go index 6402bf8d905..dea7d6723a5 100644 --- a/plugin/normalize.go +++ b/plugin/normalize.go @@ -12,7 +12,7 @@ import ( // See core/dnsserver/address.go - we should unify these two impls. -// Zones respresents a lists of zone names. +// Zones represents a lists of zone names. type Zones []string // Matches checks if qname is a subdomain of any of the zones in z. The match @@ -61,13 +61,26 @@ type ( // Normalize will return the host portion of host, stripping // of any port or transport. The host will also be fully qualified and lowercased. +// An empty string is returned on failure func (h Host) Normalize() string { + // The error can be ignored here, because this function should only be called after the corefile has already been vetted. + host, _ := h.MustNormalize() + return host +} + +// MustNormalize will return the host portion of host, stripping +// of any port or transport. The host will also be fully qualified and lowercased. +// An error is returned on error +func (h Host) MustNormalize() (string, error) { s := string(h) _, s = parse.Transport(s) - // The error can be ignore here, because this function is called after the corefile has already been vetted. - host, _, _, _ := SplitHostPort(s) - return Name(host).Normalize() + // The error can be ignored here, because this function is called after the corefile has already been vetted. + host, _, _, err := SplitHostPort(s) + if err != nil { + return "", err + } + return Name(host).Normalize(), nil } // SplitHostPort splits s up in a host and port portion, taking reverse address notation into account. @@ -76,7 +89,7 @@ func (h Host) Normalize() string { func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) { // If there is: :[0-9]+ on the end we assume this is the port. This works for (ascii) domain // names and our reverse syntax, which always needs a /mask *before* the port. - // So from the back, find first colon, and then check if its a number. + // So from the back, find first colon, and then check if it's a number. host = s colon := strings.LastIndex(s, ":") diff --git a/plugin/normalize_test.go b/plugin/normalize_test.go index 315aaf5d9fe..2a82271bae6 100644 --- a/plugin/normalize_test.go +++ b/plugin/normalize_test.go @@ -83,6 +83,17 @@ func TestHostNormalize(t *testing.T) { } } +func TestHostMustNormalizeFail(t *testing.T) { + hosts := []string{"..:53", "::", ""} + for i := 0; i < len(hosts); i++ { + ts := hosts[i] + h, err := Host(ts).MustNormalize() + if err == nil { + t.Errorf("Expected error, got %v", h) + } + } +} + func TestSplitHostPortReverse(t *testing.T) { tests := map[string]int{ "example.org.": 0, diff --git a/plugin/nsid/OWNERS b/plugin/nsid/OWNERS deleted file mode 100644 index 4e0ca6d99d2..00000000000 --- a/plugin/nsid/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - yongtang -approvers: - - yongtang diff --git a/plugin/nsid/README.md b/plugin/nsid/README.md index 0ff5cd764ee..c50acbf59b5 100644 --- a/plugin/nsid/README.md +++ b/plugin/nsid/README.md @@ -28,7 +28,7 @@ If **DATA** is not given, the host's name is used. Enable nsid: ~~~ corefile -. { +example.org { whoami nsid Use The Force } diff --git a/plugin/nsid/setup.go b/plugin/nsid/setup.go index c851c7d20f1..3fa0edd8547 100644 --- a/plugin/nsid/setup.go +++ b/plugin/nsid/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("nsid", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("nsid", setup) } func setup(c *caddy.Controller) error { nsid, err := nsidParse(c) diff --git a/plugin/pkg/cache/cache.go b/plugin/pkg/cache/cache.go index 8a5ad783e05..3a2c8ff7ffa 100644 --- a/plugin/pkg/cache/cache.go +++ b/plugin/pkg/cache/cache.go @@ -76,12 +76,15 @@ func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{ // Add adds element indexed by key into the cache. Any existing element is overwritten func (s *shard) Add(key uint64, el interface{}) { - l := s.Len() - if l+1 > s.size { - s.Evict() - } - s.Lock() + if len(s.items) >= s.size { + if _, ok := s.items[key]; !ok { + for k := range s.items { + delete(s.items, k) + break + } + } + } s.items[key] = el s.Unlock() } @@ -95,24 +98,12 @@ func (s *shard) Remove(key uint64) { // Evict removes a random element from the cache. func (s *shard) Evict() { - hasKey := false - var key uint64 - - s.RLock() + s.Lock() for k := range s.items { - key = k - hasKey = true + delete(s.items, k) break } - s.RUnlock() - - if !hasKey { - // empty cache - return - } - - // If this item is gone between the RUnlock and Lock race we don't care. - s.Remove(key) + s.Unlock() } // Get looks up the element indexed under key. diff --git a/plugin/pkg/cache/cache_test.go b/plugin/pkg/cache/cache_test.go index 0c56bb9b331..2714967a661 100644 --- a/plugin/pkg/cache/cache_test.go +++ b/plugin/pkg/cache/cache_test.go @@ -3,12 +3,23 @@ package cache import "testing" func TestCacheAddAndGet(t *testing.T) { - c := New(4) + const N = shardSize * 4 + c := New(N) c.Add(1, 1) if _, found := c.Get(1); !found { t.Fatal("Failed to find inserted record") } + + for i := 0; i < N; i++ { + c.Add(uint64(i), 1) + } + for i := 0; i < N; i++ { + c.Add(uint64(i), 1) + if c.Len() != N { + t.Fatal("A item was unnecessarily evicted from the cache") + } + } } func TestCacheLen(t *testing.T) { @@ -30,6 +41,18 @@ func TestCacheLen(t *testing.T) { } } +func TestCacheSharding(t *testing.T) { + c := New(shardSize) + for i := 0; i < shardSize*2; i++ { + c.Add(uint64(i), 1) + } + for i, s := range c.shards { + if s.Len() == 0 { + t.Errorf("Failed to populate shard: %d", i) + } + } +} + func BenchmarkCache(b *testing.B) { b.ReportAllocs() diff --git a/plugin/pkg/cache/shard_test.go b/plugin/pkg/cache/shard_test.go index 26675cee122..a3831305db6 100644 --- a/plugin/pkg/cache/shard_test.go +++ b/plugin/pkg/cache/shard_test.go @@ -1,14 +1,40 @@ package cache -import "testing" +import ( + "sync" + "testing" +) func TestShardAddAndGet(t *testing.T) { - s := newShard(4) + s := newShard(1) s.Add(1, 1) if _, found := s.Get(1); !found { t.Fatal("Failed to find inserted record") } + + s.Add(2, 1) + if _, found := s.Get(1); found { + t.Fatal("Failed to evict record") + } + if _, found := s.Get(2); !found { + t.Fatal("Failed to find inserted record") + } +} + +func TestAddEvict(t *testing.T) { + const size = 1024 + s := newShard(size) + + for i := uint64(0); i < size; i++ { + s.Add(i, 1) + } + for i := uint64(0); i < size; i++ { + s.Add(i, 1) + if s.Len() != size { + t.Fatal("A item was unnecessarily evicted from the cache") + } + } } func TestShardLen(t *testing.T) { @@ -57,4 +83,57 @@ func TestShardLenEvict(t *testing.T) { if l := s.Len(); l != 4 { t.Fatalf("Shard size should %d, got %d", 4, l) } + + // Make sure we don't accidentally evict an element when + // we the key is already stored. + for i := 0; i < 4; i++ { + s.Add(5, 1) + if l := s.Len(); l != 4 { + t.Fatalf("Shard size should %d, got %d", 4, l) + } + } +} + +func TestShardEvictParallel(t *testing.T) { + s := newShard(shardSize) + for i := uint64(0); i < shardSize; i++ { + s.Add(i, struct{}{}) + } + start := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < shardSize; i++ { + wg.Add(1) + go func() { + <-start + s.Evict() + wg.Done() + }() + } + close(start) // start evicting in parallel + wg.Wait() + if s.Len() != 0 { + t.Fatalf("Failed to evict all keys in parallel: %d", s.Len()) + } +} + +func BenchmarkShard(b *testing.B) { + s := newShard(shardSize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := uint64(i) % shardSize * 2 + s.Add(k, 1) + s.Get(k) + } +} + +func BenchmarkShardParallel(b *testing.B) { + s := newShard(shardSize) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for i := uint64(0); pb.Next(); i++ { + k := i % shardSize * 2 + s.Add(k, 1) + s.Get(k) + } + }) } diff --git a/plugin/pkg/dnstest/server.go b/plugin/pkg/dnstest/server.go index bbad0f96bfa..94c390623fa 100644 --- a/plugin/pkg/dnstest/server.go +++ b/plugin/pkg/dnstest/server.go @@ -3,6 +3,8 @@ package dnstest import ( "net" + "github.com/coredns/coredns/plugin/pkg/reuseport" + "github.com/miekg/dns" ) @@ -27,7 +29,7 @@ func NewServer(f dns.HandlerFunc) *Server { s2 := &dns.Server{} // tcp for i := 0; i < 5; i++ { // 5 attempts - s2.Listener, _ = net.Listen("tcp", ":0") + s2.Listener, _ = reuseport.Listen("tcp", ":0") if s2.Listener == nil { continue } diff --git a/plugin/pkg/edns/edns.go b/plugin/pkg/edns/edns.go index 68fb038657c..31f57ea9b89 100644 --- a/plugin/pkg/edns/edns.go +++ b/plugin/pkg/edns/edns.go @@ -63,7 +63,7 @@ func Version(req *dns.Msg) (*dns.Msg, error) { } // Size returns a normalized size based on proto. -func Size(proto string, size int) int { +func Size(proto string, size uint16) uint16 { if proto == "tcp" { return dns.MaxMsgSize } diff --git a/plugin/pkg/fall/fall.go b/plugin/pkg/fall/fall.go index c8cdc6ff68c..067deb9a617 100644 --- a/plugin/pkg/fall/fall.go +++ b/plugin/pkg/fall/fall.go @@ -1,4 +1,16 @@ -// Package fall handles the fallthrough logic used in plugins that support it. +// Package fall handles the fallthrough logic used in plugins that support it. Be careful when including this +// functionality in your plugin. Why? In the DNS only 1 source is authoritative for a set of names. Fallthrough +// breaks this convention by allowing a plugin to query multiple sources, depending on the replies it got sofar. +// +// This may cause issues in downstream caches, where different answers for the same query can potentially confuse clients. +// On the other hand this is a powerful feature that can aid in migration or other edge cases. +// +// The take away: be mindful of this and don't blindly assume it's a good feature to have in your plugin. +// +// See http://github.com/coredns/coredns/issues/2723 for some discussion on this, which includes this quote: +// +// TL;DR: `fallthrough` is indeed risky and hackish, but still a good feature of CoreDNS as it allows to quickly answer boring edge cases. +// package fall import ( diff --git a/plugin/pkg/fuzz/do.go b/plugin/pkg/fuzz/do.go index dfb046b25bc..054c4298ab9 100644 --- a/plugin/pkg/fuzz/do.go +++ b/plugin/pkg/fuzz/do.go @@ -10,18 +10,22 @@ import ( "github.com/miekg/dns" ) -// Do will fuzz p - used by gofuzz. See Maefile.fuzz for comments and context. +// Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context. func Do(p plugin.Handler, data []byte) int { ctx := context.TODO() - ret := 1 r := new(dns.Msg) if err := r.Unpack(data); err != nil { - ret = 0 + return 0 // plugin will never be called when this happens. + } + // If the data unpack into a dns msg, but does not have a proper question section discard it. + // The server parts make sure this is true before calling the plugins; mimic this behavior. + if len(r.Question) == 0 { + return 0 } if _, err := p.ServeDNS(ctx, &test.ResponseWriter{}, r); err != nil { - ret = 1 + return 1 } - return ret + return 0 } diff --git a/plugin/pkg/log/log.go b/plugin/pkg/log/log.go index 3050999afe4..6f9dd07c68d 100644 --- a/plugin/pkg/log/log.go +++ b/plugin/pkg/log/log.go @@ -1,7 +1,7 @@ -// Package log implements a small wrapper around the std lib log package. -// It implements log levels by prefixing the logs with the current time -// with in RFC3339Milli and [INFO], [DEBUG], [WARNING] or [ERROR]. -// Debug logging is available and enabled if the *debug* plugin is used. +// Package log implements a small wrapper around the std lib log package. It +// implements log levels by prefixing the logs with [INFO], [DEBUG], [WARNING] +// or [ERROR]. Debug logging is available and enabled if the *debug* plugin is +// used. // // log.Info("this is some logging"), will log on the Info level. // @@ -14,7 +14,6 @@ import ( golog "log" "os" "sync" - "time" ) // D controls whether we should output debug logs. If true, we do, once set @@ -26,14 +25,21 @@ type d struct { sync.RWMutex } -// Set sets d to true. +// Set enables debug logging. func (d *d) Set() { d.Lock() d.on = true d.Unlock() } -// Value return the boolean value of d. +// Clear disables debug logging. +func (d *d) Clear() { + d.Lock() + d.on = false + d.Unlock() +} + +// Value returns if debug logging is enabled. func (d *d) Value() bool { d.RLock() b := d.on @@ -41,17 +47,14 @@ func (d *d) Value() bool { return b } -// RFC3339Milli doesn't exist, invent it here. -func clock() string { return time.Now().Format("2006-01-02T15:04:05.000Z07:00") } - // logf calls log.Printf prefixed with level. func logf(level, format string, v ...interface{}) { - golog.Print(clock(), level, fmt.Sprintf(format, v...)) + golog.Print(level, fmt.Sprintf(format, v...)) } // log calls log.Print prefixed with level. func log(level string, v ...interface{}) { - golog.Print(clock(), level, fmt.Sprint(v...)) + golog.Print(level, fmt.Sprint(v...)) } // Debug is equivalent to log.Print(), but prefixed with "[DEBUG] ". It only outputs something @@ -102,9 +105,9 @@ func Fatalf(format string, v ...interface{}) { logf(fatal, format, v...); os.Exi func Discard() { golog.SetOutput(ioutil.Discard) } const ( - debug = " [DEBUG] " - err = " [ERROR] " - fatal = " [FATAL] " - info = " [INFO] " - warning = " [WARNING] " + debug = "[DEBUG] " + err = "[ERROR] " + fatal = "[FATAL] " + info = "[INFO] " + warning = "[WARNING] " ) diff --git a/plugin/pkg/log/log_test.go b/plugin/pkg/log/log_test.go index 4f0dc4f4318..32c1d39ad15 100644 --- a/plugin/pkg/log/log_test.go +++ b/plugin/pkg/log/log_test.go @@ -23,6 +23,13 @@ func TestDebug(t *testing.T) { if x := f.String(); !strings.Contains(x, debug+"debug") { t.Errorf("Expected debug log to be %s, got %s", debug+"debug", x) } + f.Reset() + + D.Clear() + Debug("debug") + if x := f.String(); x != "" { + t.Errorf("Expected no debug logs, got %s", x) + } } func TestDebugx(t *testing.T) { diff --git a/plugin/pkg/log/plugin_test.go b/plugin/pkg/log/plugin_test.go index c023386088c..b24caa48b09 100644 --- a/plugin/pkg/log/plugin_test.go +++ b/plugin/pkg/log/plugin_test.go @@ -19,19 +19,3 @@ func TestPlugins(t *testing.T) { t.Errorf("Expected log to be %s, got %s", info+ts, x) } } - -func TestPluginsDateTime(t *testing.T) { - var f bytes.Buffer - const ts = "test" - golog.SetFlags(0) // Set to 0 because we're doing our own time, with timezone - golog.SetOutput(&f) - - lg := NewWithPlugin("testplugin") - - lg.Info(ts) - // rude check if the date/time is there - str := f.String() - if str[4] != '-' || str[7] != '-' || str[10] != 'T' { - t.Errorf("Expected date got %s...", str[:15]) - } -} diff --git a/plugin/pkg/parse/host.go b/plugin/pkg/parse/host.go index 87177125fb1..c1b7d23e0fb 100644 --- a/plugin/pkg/parse/host.go +++ b/plugin/pkg/parse/host.go @@ -4,12 +4,23 @@ import ( "fmt" "net" "os" + "strings" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/miekg/dns" ) +// Strips the zone, but preserves any port that comes after the zone +func stripZone(host string) string { + if strings.Contains(host, "%") { + lastPercent := strings.LastIndex(host, "%") + newHost := host[:lastPercent] + return newHost + } + return host +} + // HostPortOrFile parses the strings in s, each string can either be a // address, [scheme://]address:port or a filename. The address part is checked // and in case of filename a resolv.conf like file is (assumed) and parsed and @@ -21,10 +32,11 @@ func HostPortOrFile(s ...string) ([]string, error) { trans, host := Transport(h) addr, _, err := net.SplitHostPort(host) + if err != nil { // Parse didn't work, it is not a addr:port combo - if net.ParseIP(host) == nil { - // Not an IP address. + hostNoZone := stripZone(host) + if net.ParseIP(hostNoZone) == nil { ss, err := tryFile(host) if err == nil { servers = append(servers, ss...) @@ -47,8 +59,7 @@ func HostPortOrFile(s ...string) ([]string, error) { continue } - if net.ParseIP(addr) == nil { - // Not an IP address. + if net.ParseIP(stripZone(addr)) == nil { ss, err := tryFile(host) if err == nil { servers = append(servers, ss...) diff --git a/plugin/pkg/parse/host_test.go b/plugin/pkg/parse/host_test.go index f6e771f29c3..1c23c5beed1 100644 --- a/plugin/pkg/parse/host_test.go +++ b/plugin/pkg/parse/host_test.go @@ -34,6 +34,26 @@ func TestHostPortOrFile(t *testing.T) { "127.0.0.1:53", false, }, + { + "fe80::1", + "[fe80::1]:53", + false, + }, + { + "fe80::1%ens3", + "[fe80::1%ens3]:53", + false, + }, + { + "[fd01::1]:153", + "[fd01::1]:153", + false, + }, + { + "[fd01::1%ens3]:153", + "[fd01::1%ens3]:153", + false, + }, } err := ioutil.WriteFile("resolv.conf", []byte("nameserver 127.0.0.1\n"), 0600) diff --git a/plugin/pkg/replacer/replacer.go b/plugin/pkg/replacer/replacer.go index 3e2cbfcbd61..81261ca799a 100644 --- a/plugin/pkg/replacer/replacer.go +++ b/plugin/pkg/replacer/replacer.go @@ -4,6 +4,7 @@ import ( "context" "strconv" "strings" + "sync" "time" "github.com/coredns/coredns/plugin/metadata" @@ -14,195 +15,262 @@ import ( ) // Replacer replaces labels for values in strings. -type Replacer struct { - valueFunc func(request.Request, *dnstest.Recorder, string) string - labels []string +type Replacer struct{} + +// New makes a new replacer. This only needs to be called once in the setup and +// then call Replace for each incoming message. A replacer is safe for concurrent use. +func New() Replacer { + return Replacer{} +} + +// Replace performs a replacement of values on s and returns the string with the replaced values. +func (r Replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder, s string) string { + return loadFormat(s).Replace(ctx, state, rr) } +const ( + headerReplacer = "{>" + // EmptyValue is the default empty value. + EmptyValue = "-" +) + // labels are all supported labels that can be used in the default Replacer. -var labels = []string{ - "{type}", - "{name}", - "{class}", - "{proto}", - "{size}", - "{remote}", - "{port}", - "{local}", +var labels = map[string]struct{}{ + "{type}": {}, + "{name}": {}, + "{class}": {}, + "{proto}": {}, + "{size}": {}, + "{remote}": {}, + "{port}": {}, + "{local}": {}, // Header values. - headerReplacer + "id}", - headerReplacer + "opcode}", - headerReplacer + "do}", - headerReplacer + "bufsize}", + headerReplacer + "id}": {}, + headerReplacer + "opcode}": {}, + headerReplacer + "do}": {}, + headerReplacer + "bufsize}": {}, // Recorded replacements. - "{rcode}", - "{rsize}", - "{duration}", - headerReplacer + "rflags}", + "{rcode}": {}, + "{rsize}": {}, + "{duration}": {}, + headerReplacer + "rflags}": {}, } -// value returns the current value of label. -func value(state request.Request, rr *dnstest.Recorder, label string) string { +// appendValue appends the current value of label. +func appendValue(b []byte, state request.Request, rr *dnstest.Recorder, label string) []byte { switch label { case "{type}": - return state.Type() + return append(b, state.Type()...) case "{name}": - return state.Name() + return append(b, state.Name()...) case "{class}": - return state.Class() + return append(b, state.Class()...) case "{proto}": - return state.Proto() + return append(b, state.Proto()...) case "{size}": - return strconv.Itoa(state.Req.Len()) + return strconv.AppendInt(b, int64(state.Req.Len()), 10) case "{remote}": - return addrToRFC3986(state.IP()) + return appendAddrToRFC3986(b, state.IP()) case "{port}": - return state.Port() + return append(b, state.Port()...) case "{local}": - return addrToRFC3986(state.LocalIP()) + return appendAddrToRFC3986(b, state.LocalIP()) // Header placeholders (case-insensitive). case headerReplacer + "id}": - return strconv.Itoa(int(state.Req.Id)) + return strconv.AppendInt(b, int64(state.Req.Id), 10) case headerReplacer + "opcode}": - return strconv.Itoa(state.Req.Opcode) + return strconv.AppendInt(b, int64(state.Req.Opcode), 10) case headerReplacer + "do}": - return boolToString(state.Do()) + return strconv.AppendBool(b, state.Do()) case headerReplacer + "bufsize}": - return strconv.Itoa(state.Size()) + return strconv.AppendInt(b, int64(state.Size()), 10) // Recorded replacements. case "{rcode}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - rcode := dns.RcodeToString[rr.Rcode] - if rcode == "" { - rcode = strconv.Itoa(rr.Rcode) + if rcode := dns.RcodeToString[rr.Rcode]; rcode != "" { + return append(b, rcode...) } - return rcode + return strconv.AppendInt(b, int64(rr.Rcode), 10) case "{rsize}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - return strconv.Itoa(rr.Len) + return strconv.AppendInt(b, int64(rr.Len), 10) case "{duration}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - return strconv.FormatFloat(time.Since(rr.Start).Seconds(), 'f', -1, 64) + "s" + secs := time.Since(rr.Start).Seconds() + return append(strconv.AppendFloat(b, secs, 'f', -1, 64), 's') case headerReplacer + "rflags}": if rr != nil && rr.Msg != nil { - return flagsToString(rr.Msg.MsgHdr) - } - return EmptyValue - } - return EmptyValue -} - -// New makes a new replacer. This only needs to be called once in the setup and then call Replace for each incoming message. -// A replacer is safe for concurrent use. -func New() Replacer { - return Replacer{ - valueFunc: value, - labels: labels, - } -} - -// Replace performs a replacement of values on s and returns the string with the replaced values. -func (r Replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder, s string) string { - for _, placeholder := range r.labels { - if strings.Contains(s, placeholder) { - s = strings.Replace(s, placeholder, r.valueFunc(state, rr, placeholder), -1) - } - } - - // Metadata label replacements. Scan for {/ and search for next }, replace that metadata label with - // any meta data that is available. - b := strings.Builder{} - for strings.Contains(s, labelReplacer) { - idxStart := strings.Index(s, labelReplacer) - endOffset := idxStart + len(labelReplacer) - idxEnd := strings.Index(s[endOffset:], "}") - if idxEnd > -1 { - label := s[idxStart+2 : endOffset+idxEnd] - - fm := metadata.ValueFunc(ctx, label) - replacement := EmptyValue - if fm != nil { - replacement = fm() - } - - b.WriteString(s[:idxStart]) - b.WriteString(replacement) - s = s[endOffset+idxEnd+1:] - } else { - break + return appendFlags(b, rr.Msg.MsgHdr) } + return append(b, EmptyValue...) + default: + return append(b, EmptyValue...) } - - b.WriteString(s) - return b.String() -} - -func boolToString(b bool) string { - if b { - return "true" - } - return "false" } -// flagsToString checks all header flags and returns those +// appendFlags checks all header flags and appends those // that are set as a string separated with commas -func flagsToString(h dns.MsgHdr) string { - flags := make([]string, 7) - i := 0 - +func appendFlags(b []byte, h dns.MsgHdr) []byte { + origLen := len(b) if h.Response { - flags[i] = "qr" - i++ + b = append(b, "qr,"...) } - if h.Authoritative { - flags[i] = "aa" - i++ + b = append(b, "aa,"...) } if h.Truncated { - flags[i] = "tc" - i++ + b = append(b, "tc,"...) } if h.RecursionDesired { - flags[i] = "rd" - i++ + b = append(b, "rd,"...) } if h.RecursionAvailable { - flags[i] = "ra" - i++ + b = append(b, "ra,"...) } if h.Zero { - flags[i] = "z" - i++ + b = append(b, "z,"...) } if h.AuthenticatedData { - flags[i] = "ad" - i++ + b = append(b, "ad,"...) } if h.CheckingDisabled { - flags[i] = "cd" - i++ + b = append(b, "cd,"...) } - return strings.Join(flags[:i], ",") + if n := len(b); n > origLen { + return b[:n-1] // trim trailing ',' + } + return b } -// addrToRFC3986 will add brackets to the address if it is an IPv6 address. -func addrToRFC3986(addr string) string { - if strings.Contains(addr, ":") { - return "[" + addr + "]" +// appendAddrToRFC3986 will add brackets to the address if it is an IPv6 address. +func appendAddrToRFC3986(b []byte, addr string) []byte { + if strings.IndexByte(addr, ':') != -1 { + b = append(b, '[') + b = append(b, addr...) + b = append(b, ']') + } else { + b = append(b, addr...) } - return addr + return b } +type nodeType int + const ( - headerReplacer = "{>" - labelReplacer = "{/" - // EmptyValue is the default empty value. - EmptyValue = "-" + typeLabel nodeType = iota // "{type}" + typeLiteral // "foo" + typeMetadata // "{/metadata}" ) + +// A node represents a segment of a parsed format. For example: "A {type}" +// contains two nodes: "A " (literal); and "{type}" (label). +type node struct { + value string // Literal value, label or metadata label + typ nodeType +} + +// A replacer is an ordered list of all the nodes in a format. +type replacer []node + +func parseFormat(s string) replacer { + // Assume there is a literal between each label - its cheaper to over + // allocate once than allocate twice. + rep := make(replacer, 0, strings.Count(s, "{")*2) + for { + // We find the right bracket then backtrack to find the left bracket. + // This allows us to handle formats like: "{ {foo} }". + j := strings.IndexByte(s, '}') + if j < 0 { + break + } + i := strings.LastIndexByte(s[:j], '{') + if i < 0 { + // Handle: "A } {foo}" by treating "A }" as a literal + rep = append(rep, node{ + value: s[:j+1], + typ: typeLiteral, + }) + s = s[j+1:] + continue + } + + val := s[i : j+1] + var typ nodeType + switch _, ok := labels[val]; { + case ok: + typ = typeLabel + case strings.HasPrefix(val, "{/"): + // Strip "{/}" from metadata labels + val = val[2 : len(val)-1] + typ = typeMetadata + default: + // Given: "A {X}" val is "{X}" expand it to the whole literal. + val = s[:j+1] + typ = typeLiteral + } + + // Append any leading literal. Given "A {type}" the literal is "A " + if i != 0 && typ != typeLiteral { + rep = append(rep, node{ + value: s[:i], + typ: typeLiteral, + }) + } + rep = append(rep, node{ + value: val, + typ: typ, + }) + s = s[j+1:] + } + if len(s) != 0 { + rep = append(rep, node{ + value: s, + typ: typeLiteral, + }) + } + return rep +} + +var replacerCache sync.Map // map[string]replacer + +func loadFormat(s string) replacer { + if v, ok := replacerCache.Load(s); ok { + return v.(replacer) + } + v, _ := replacerCache.LoadOrStore(s, parseFormat(s)) + return v.(replacer) +} + +// bufPool stores pointers to scratch buffers. +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 0, 256) + }, +} + +func (r replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder) string { + b := bufPool.Get().([]byte) + for _, s := range r { + switch s.typ { + case typeLabel: + b = appendValue(b, state, rr, s.value) + case typeLiteral: + b = append(b, s.value...) + case typeMetadata: + if fm := metadata.ValueFunc(ctx, s.value); fm != nil { + b = append(b, fm()...) + } else { + b = append(b, EmptyValue...) + } + } + } + s := string(b) + bufPool.Put(b[:0]) + return s +} diff --git a/plugin/pkg/replacer/replacer_test.go b/plugin/pkg/replacer/replacer_test.go index 13ba396b974..3e0a5069119 100644 --- a/plugin/pkg/replacer/replacer_test.go +++ b/plugin/pkg/replacer/replacer_test.go @@ -2,6 +2,8 @@ package replacer import ( "context" + "reflect" + "strings" "testing" "github.com/coredns/coredns/plugin/metadata" @@ -12,6 +14,9 @@ import ( "github.com/miekg/dns" ) +// This is the default format used by the log package +const CommonLogFormat = `{remote}:{port} - {>id} "{type} {class} {name} {proto} {size} {>do} {>bufsize}" {rcode} {>rflags} {rsize} {duration}` + func TestReplacer(t *testing.T) { w := dnstest.NewRecorder(&test.ResponseWriter{}) r := new(dns.Msg) @@ -32,6 +37,178 @@ func TestReplacer(t *testing.T) { } } +func TestParseFormat(t *testing.T) { + type formatTest struct { + Format string + Expected replacer + } + tests := []formatTest{ + { + Format: "", + Expected: replacer{}, + }, + { + Format: "A", + Expected: replacer{ + {"A", typeLiteral}, + }, + }, + { + Format: "A {A}", + Expected: replacer{ + {"A {A}", typeLiteral}, + }, + }, + { + Format: "{{remote}}", + Expected: replacer{ + {"{", typeLiteral}, + {"{remote}", typeLabel}, + {"}", typeLiteral}, + }, + }, + { + Format: "{ A {remote} A }", + Expected: replacer{ + {"{ A ", typeLiteral}, + {"{remote}", typeLabel}, + {" A }", typeLiteral}, + }, + }, + { + Format: "{remote}}", + Expected: replacer{ + {"{remote}", typeLabel}, + {"}", typeLiteral}, + }, + }, + { + Format: "{{remote}", + Expected: replacer{ + {"{", typeLiteral}, + {"{remote}", typeLabel}, + }, + }, + { + Format: `Foo } {remote}`, + Expected: replacer{ + // we don't do any optimizations to join adjacent literals + {"Foo }", typeLiteral}, + {" ", typeLiteral}, + {"{remote}", typeLabel}, + }, + }, + { + Format: `{ Foo`, + Expected: replacer{ + {"{ Foo", typeLiteral}, + }, + }, + { + Format: `} Foo`, + Expected: replacer{ + {"}", typeLiteral}, + {" Foo", typeLiteral}, + }, + }, + { + Format: "A { {remote} {type} {/meta1} } B", + Expected: replacer{ + {"A { ", typeLiteral}, + {"{remote}", typeLabel}, + {" ", typeLiteral}, + {"{type}", typeLabel}, + {" ", typeLiteral}, + {"meta1", typeMetadata}, + {" }", typeLiteral}, + {" B", typeLiteral}, + }, + }, + { + Format: `LOG {remote}:{port} - {>id} "{type} {class} {name} {proto} ` + + `{size} {>do} {>bufsize}" {rcode} {>rflags} {rsize} {/meta1}-{/meta2} ` + + `{duration} END OF LINE`, + Expected: replacer{ + {"LOG ", typeLiteral}, + {"{remote}", typeLabel}, + {":", typeLiteral}, + {"{port}", typeLabel}, + {" - ", typeLiteral}, + {"{>id}", typeLabel}, + {` "`, typeLiteral}, + {"{type}", typeLabel}, + {" ", typeLiteral}, + {"{class}", typeLabel}, + {" ", typeLiteral}, + {"{name}", typeLabel}, + {" ", typeLiteral}, + {"{proto}", typeLabel}, + {" ", typeLiteral}, + {"{size}", typeLabel}, + {" ", typeLiteral}, + {"{>do}", typeLabel}, + {" ", typeLiteral}, + {"{>bufsize}", typeLabel}, + {`" `, typeLiteral}, + {"{rcode}", typeLabel}, + {" ", typeLiteral}, + {"{>rflags}", typeLabel}, + {" ", typeLiteral}, + {"{rsize}", typeLabel}, + {" ", typeLiteral}, + {"meta1", typeMetadata}, + {"-", typeLiteral}, + {"meta2", typeMetadata}, + {" ", typeLiteral}, + {"{duration}", typeLabel}, + {" END OF LINE", typeLiteral}, + }, + }, + } + for i, x := range tests { + r := parseFormat(x.Format) + if !reflect.DeepEqual(r, x.Expected) { + t.Errorf("%d: Expected:\n\t%+v\nGot:\n\t%+v", i, x.Expected, r) + } + } +} + +func TestParseFormatNodes(t *testing.T) { + // If we parse the format successfully the result of joining all the + // segments should match the original format. + formats := []string{ + "", + "msg", + "{remote}", + "{remote}", + "{{remote}", + "{{remote}}", + "{{remote}} A", + CommonLogFormat, + CommonLogFormat + " FOO} {BAR}", + "A " + CommonLogFormat + " FOO} {BAR}", + "A " + CommonLogFormat + " {/meta}", + } + join := func(r replacer) string { + a := make([]string, len(r)) + for i, n := range r { + if n.typ == typeMetadata { + a[i] = "{/" + n.value + "}" + } else { + a[i] = n.value + } + } + return strings.Join(a, "") + } + for _, format := range formats { + r := parseFormat(format) + s := join(r) + if s != format { + t.Errorf("Expected format to be: '%s' got: '%s'", format, s) + } + } +} + func TestLabels(t *testing.T) { w := dnstest.NewRecorder(&test.ResponseWriter{}) r := new(dns.Msg) @@ -68,7 +245,7 @@ func TestLabels(t *testing.T) { t.Fatalf("Expect %d labels, got %d", len(expect), len(labels)) } - for _, lbl := range labels { + for lbl := range labels { repl := replacer.Replace(ctx, state, w, lbl) if lbl == "{duration}" { if repl[len(repl)-1] != 's' { @@ -98,6 +275,34 @@ func BenchmarkReplacer(b *testing.B) { } } +func BenchmarkReplacer_CommonLogFormat(b *testing.B) { + + w := dnstest.NewRecorder(&test.ResponseWriter{}) + r := new(dns.Msg) + r.SetQuestion("example.org.", dns.TypeHINFO) + r.Id = 1053 + r.AuthenticatedData = true + r.CheckingDisabled = true + r.MsgHdr.AuthenticatedData = true + w.WriteMsg(r) + state := request.Request{W: w, Req: r} + + replacer := New() + ctxt := context.TODO() + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + replacer.Replace(ctxt, state, w, CommonLogFormat) + } +} + +func BenchmarkParseFormat(b *testing.B) { + for i := 0; i < b.N; i++ { + parseFormat(CommonLogFormat) + } +} + type testProvider map[string]metadata.Func func (tp testProvider) Metadata(ctx context.Context, state request.Request) context.Context { diff --git a/plugin/pkg/response/typify.go b/plugin/pkg/response/typify.go index f4dd91fa4c3..df314d41a44 100644 --- a/plugin/pkg/response/typify.go +++ b/plugin/pkg/response/typify.go @@ -15,7 +15,7 @@ const ( NoError Type = iota // NameError is a NXDOMAIN in header, SOA in auth. NameError - // ServerError is a set of errors we want to cache, for now it containers SERVFAIL and NOTIMPL. + // ServerError is a set of errors we want to cache, for now it contains SERVFAIL and NOTIMPL. ServerError // NoData indicates name found, but not the type: NOERROR in header, SOA in auth. NoData diff --git a/plugin/pkg/response/typify_test.go b/plugin/pkg/response/typify_test.go index f22b806fbd6..fca6ba10068 100644 --- a/plugin/pkg/response/typify_test.go +++ b/plugin/pkg/response/typify_test.go @@ -49,7 +49,7 @@ func TestTypifyRRSIG(t *testing.T) { } func TestTypifyImpossible(t *testing.T) { - // create impossible message that denies it's own existence + // create impossible message that denies its own existence m := new(dns.Msg) m.SetQuestion("bar.www.example.org.", dns.TypeAAAA) m.Rcode = dns.RcodeNameError // name does not exist diff --git a/plugin/pkg/reuseport/listen_go111.go b/plugin/pkg/reuseport/listen_go111.go new file mode 100644 index 00000000000..fa6f365d6e7 --- /dev/null +++ b/plugin/pkg/reuseport/listen_go111.go @@ -0,0 +1,37 @@ +// +build go1.11 +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package reuseport + +import ( + "context" + "net" + "syscall" + + "github.com/coredns/coredns/plugin/pkg/log" + + "golang.org/x/sys/unix" +) + +func control(network, address string, c syscall.RawConn) error { + c.Control(func(fd uintptr) { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { + log.Warningf("Failed to set SO_REUSEPORT on socket: %s", err) + } + }) + return nil +} + +// Listen announces on the local network address. See net.Listen for more information. +// If SO_REUSEPORT is available it will be set on the socket. +func Listen(network, addr string) (net.Listener, error) { + lc := net.ListenConfig{Control: control} + return lc.Listen(context.Background(), network, addr) +} + +// ListenPacket announces on the local network address. See net.ListenPacket for more information. +// If SO_REUSEPORT is available it will be set on the socket. +func ListenPacket(network, addr string) (net.PacketConn, error) { + lc := net.ListenConfig{Control: control} + return lc.ListenPacket(context.Background(), network, addr) +} diff --git a/plugin/pkg/reuseport/listen_go_not111.go b/plugin/pkg/reuseport/listen_go_not111.go new file mode 100644 index 00000000000..e3bdfb90690 --- /dev/null +++ b/plugin/pkg/reuseport/listen_go_not111.go @@ -0,0 +1,13 @@ +// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package reuseport + +import "net" + +// Listen is a wrapper around net.Listen. +func Listen(network, addr string) (net.Listener, error) { return net.Listen(network, addr) } + +// ListenPacket is a wrapper around net.ListenPacket. +func ListenPacket(network, addr string) (net.PacketConn, error) { + return net.ListenPacket(network, addr) +} diff --git a/plugin/plugin.go b/plugin/plugin.go index 7995cbba8a8..ee1bf84296b 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -84,7 +84,7 @@ func NextOrFailure(name string, next Handler, ctx context.Context, w dns.Respons } // ClientWrite returns true if the response has been written to the client. -// Each plugin to adhire to this protocol. +// Each plugin to adhere to this protocol. func ClientWrite(rcode int) bool { switch rcode { case dns.RcodeServerFailure: diff --git a/plugin/pprof/OWNERS b/plugin/pprof/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/pprof/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/pprof/pprof.go b/plugin/pprof/pprof.go index 8367a307175..822e6e222f3 100644 --- a/plugin/pprof/pprof.go +++ b/plugin/pprof/pprof.go @@ -1,4 +1,4 @@ -// Package pprof implement a debug endpoint for getting profiles using the +// Package pprof implements a debug endpoint for getting profiles using the // go pprof tooling. package pprof @@ -7,6 +7,8 @@ import ( "net/http" pp "net/http/pprof" "runtime" + + "github.com/coredns/coredns/plugin/pkg/reuseport" ) type handler struct { @@ -17,7 +19,10 @@ type handler struct { } func (h *handler) Startup() error { - ln, err := net.Listen("tcp", h.addr) + // Reloading the plugin without changing the listening address results + // in an error unless we reuse the port because Startup is called for + // new handlers before Shutdown is called for the old ones. + ln, err := reuseport.Listen("tcp", h.addr) if err != nil { log.Errorf("Failed to start pprof handler: %s", err) return err @@ -26,6 +31,9 @@ func (h *handler) Startup() error { h.ln = ln h.mux = http.NewServeMux() + h.mux.HandleFunc(path, func(rw http.ResponseWriter, req *http.Request) { + http.Redirect(rw, req, path+"/", http.StatusFound) + }) h.mux.HandleFunc(path+"/", pp.Index) h.mux.HandleFunc(path+"/cmdline", pp.Cmdline) h.mux.HandleFunc(path+"/profile", pp.Profile) diff --git a/plugin/pprof/setup.go b/plugin/pprof/setup.go index 175a160113d..a9b93673cef 100644 --- a/plugin/pprof/setup.go +++ b/plugin/pprof/setup.go @@ -3,7 +3,6 @@ package pprof import ( "net" "strconv" - "sync" "github.com/coredns/coredns/plugin" clog "github.com/coredns/coredns/plugin/pkg/log" @@ -15,12 +14,7 @@ var log = clog.NewWithPlugin("pprof") const defaultAddr = "localhost:6053" -func init() { - caddy.RegisterPlugin("pprof", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("pprof", setup) } func setup(c *caddy.Controller) error { h := &handler{addr: defaultAddr} @@ -67,12 +61,7 @@ func setup(c *caddy.Controller) error { } - pprofOnce.Do(func() { - c.OnStartup(h.Startup) - c.OnShutdown(h.Shutdown) - }) - + c.OnStartup(h.Startup) + c.OnShutdown(h.Shutdown) return nil } - -var pprofOnce sync.Once diff --git a/plugin/ready/README.md b/plugin/ready/README.md index d4a9bc5febf..d2e430de7c0 100644 --- a/plugin/ready/README.md +++ b/plugin/ready/README.md @@ -13,7 +13,7 @@ will not be queried again. Each Server Block that enables the *ready* plugin will have the plugins *in that server block* report readiness into the /ready endpoint that runs on the same port. This also means that the -*same* plugin with different configurations (in potentialy *different* Server Blocks) will have +*same* plugin with different configurations (in potentially *different* Server Blocks) will have their readiness reported as the union of their respective readinesses. ## Syntax diff --git a/plugin/ready/list.go b/plugin/ready/list.go index c283748eca8..e7d2584d878 100644 --- a/plugin/ready/list.go +++ b/plugin/ready/list.go @@ -6,7 +6,7 @@ import ( "sync" ) -// list is structure that holds the plugins that signals readiness for this server block. +// list is a structure that holds the plugins that signals readiness for this server block. type list struct { sync.RWMutex rs []Readiness diff --git a/plugin/ready/ready.go b/plugin/ready/ready.go index ff19b59f877..326d399551d 100644 --- a/plugin/ready/ready.go +++ b/plugin/ready/ready.go @@ -12,6 +12,7 @@ import ( clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/uniq" + "github.com/coredns/coredns/plugin/pkg/reuseport" ) var ( @@ -30,7 +31,7 @@ type ready struct { } func (rd *ready) onStartup() error { - ln, err := net.Listen("tcp", rd.Addr) + ln, err := reuseport.Listen("tcp", rd.Addr) if err != nil { return err } @@ -45,7 +46,7 @@ func (rd *ready) onStartup() error { ok, todo := plugins.Ready() if ok { w.WriteHeader(http.StatusOK) - io.WriteString(w, "OK") + io.WriteString(w, http.StatusText(http.StatusOK)) return } log.Infof("Still waiting on: %q", todo) diff --git a/plugin/ready/ready_test.go b/plugin/ready/ready_test.go index 7587bad9bb3..fff19cc9135 100644 --- a/plugin/ready/ready_test.go +++ b/plugin/ready/ready_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net/http" - "sync" "testing" "github.com/coredns/coredns/plugin/erratic" @@ -21,33 +20,22 @@ func TestReady(t *testing.T) { e := &erratic.Erratic{} plugins.Append(e, "erratic") - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err := rd.onStartup(); err != nil { - t.Fatalf("Unable to startup the readiness server: %v", err) - } - wg.Done() - }() - wg.Wait() + if err := rd.onStartup(); err != nil { + t.Fatalf("Unable to startup the readiness server: %v", err) + } defer rd.onFinalShutdown() address := fmt.Sprintf("http://%s/ready", rd.ln.Addr().String()) - wg.Add(1) - go func() { - response, err := http.Get(address) - if err != nil { - t.Fatalf("Unable to query %s: %v", address, err) - } - if response.StatusCode != 503 { - t.Errorf("Invalid status code: expecting %d, got %d", 503, response.StatusCode) - } - response.Body.Close() - wg.Done() - }() - wg.Wait() + response, err := http.Get(address) + if err != nil { + t.Fatalf("Unable to query %s: %v", address, err) + } + if response.StatusCode != 503 { + t.Errorf("Invalid status code: expecting %d, got %d", 503, response.StatusCode) + } + response.Body.Close() // make it ready by giving erratic 3 queries. m := new(dns.Msg) @@ -56,7 +44,7 @@ func TestReady(t *testing.T) { e.ServeDNS(context.TODO(), &test.ResponseWriter{}, m) e.ServeDNS(context.TODO(), &test.ResponseWriter{}, m) - response, err := http.Get(address) + response, err = http.Get(address) if err != nil { t.Fatalf("Unable to query %s: %v", address, err) } diff --git a/plugin/ready/setup.go b/plugin/ready/setup.go index eb1e1a8a463..3c0e2ce4379 100644 --- a/plugin/ready/setup.go +++ b/plugin/ready/setup.go @@ -9,12 +9,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("ready", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("ready", setup) } func setup(c *caddy.Controller) error { addr, err := parse(c) diff --git a/plugin/register.go b/plugin/register.go new file mode 100644 index 00000000000..2d74b262235 --- /dev/null +++ b/plugin/register.go @@ -0,0 +1,11 @@ +package plugin + +import "github.com/caddyserver/caddy" + +// Register registers your plugin with CoreDNS and allows it to be called when the server is running. +func Register(name string, action caddy.SetupFunc) { + caddy.RegisterPlugin(name, caddy.Plugin{ + ServerType: "dns", + Action: action, + }) +} diff --git a/plugin/reload/OWNERS b/plugin/reload/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/reload/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/reload/README.md b/plugin/reload/README.md index c1f5d206b2f..8e58d1e7367 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -38,11 +38,12 @@ This plugin can only be used once per Server Block. reload [INTERVAL] [JITTER] ~~~ -* The plugin will check for changes every **INTERVAL**, subject to +/- the **JITTER** duration -* **INTERVAL** and **JITTER** are Golang (durations)[https://golang.org/pkg/time/#ParseDuration] -* Default **INTERVAL** is 30s, default **JITTER** is 15s -* Minimal value for **INTERVAL** is 2s, and for **JITTER** is 1s -* If **JITTER** is more than half of **INTERVAL**, it will be set to half of **INTERVAL** +The plugin will check for changes every **INTERVAL**, subject to +/- the **JITTER** duration. + +* **INTERVAL** and **JITTER** are Golang [durations](https://golang.org/pkg/time/#ParseDuration). + The default **INTERVAL** is 30s, default **JITTER** is 15s, the minimal value for **INTERVAL** + is 2s, and for **JITTER** it is 1s. If **JITTER** is more than half of **INTERVAL**, it will be + set to half of **INTERVAL** ## Examples @@ -85,16 +86,17 @@ is already listening on that port. The process reloads and performs the followin 4. fail loading the new Corefile, abort and keep using the old process After the aborted attempt to reload we are left with the old processes running, but the listener is -closed in step 1; so the health endpoint is broken. The same can hopen in the prometheus metrics plugin. +closed in step 1; so the health endpoint is broken. The same can happen in the prometheus metrics plugin. In general be careful with assigning new port and expecting reload to work fully. -Also any `import` statement is not discovered by this plugin. This means if any of these imported files -changes the *reload* plugin is ignorant of that fact. +In CoreDNS v1.6.0 and earlier any `import` statements are not discovered by this plugin. +This means if any of these imported files changes the *reload* plugin is ignorant of that fact. +CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes in imported files. ## Metrics - If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: + If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_reload_failed_count_total{}` - counts the number of failed reload attempts. diff --git a/plugin/reload/reload.go b/plugin/reload/reload.go index f6e676d880c..817582c8c3b 100644 --- a/plugin/reload/reload.go +++ b/plugin/reload/reload.go @@ -1,14 +1,17 @@ +// Package reload periodically checks if the Corefile has changed, and reloads if so. package reload import ( + "bytes" "crypto/md5" + "encoding/json" "sync" "time" "github.com/caddyserver/caddy" + "github.com/caddyserver/caddy/caddyfile" ) -// reload periodically checks if the Corefile has changed, and reloads if so const ( unused = 0 maybeUsed = 1 @@ -46,6 +49,14 @@ func (r *reload) interval() time.Duration { return r.dur } +func parse(corefile caddy.Input) ([]byte, error) { + serverBlocks, err := caddyfile.Parse(corefile.Path(), bytes.NewReader(corefile.Body()), nil) + if err != nil { + return nil, err + } + return json.Marshal(serverBlocks) +} + func hook(event caddy.EventName, info interface{}) error { if event != caddy.InstanceStartupEvent { return nil @@ -60,7 +71,12 @@ func hook(event caddy.EventName, info interface{}) error { // this should be an instance. ok to panic if not instance := info.(*caddy.Instance) - md5sum := md5.Sum(instance.Caddyfile().Body()) + parsedCorefile, err := parse(instance.Caddyfile()) + if err != nil { + return err + } + + md5sum := md5.Sum(parsedCorefile) log.Infof("Running configuration MD5 = %x\n", md5sum) go func() { @@ -73,12 +89,17 @@ func hook(event caddy.EventName, info interface{}) error { if err != nil { continue } - s := md5.Sum(corefile.Body()) + parsedCorefile, err := parse(corefile) + if err != nil { + log.Warningf("Corefile parse failed: %s", err) + continue + } + s := md5.Sum(parsedCorefile) if s != md5sum { // Let not try to restart with the same file, even though it is wrong. md5sum = s // now lets consider that plugin will not be reload, unless appear in next config file - // change status iof usage will be reset in setup if the plugin appears in config file + // change status of usage will be reset in setup if the plugin appears in config file r.setUsage(maybeUsed) _, err := instance.Restart(corefile) if err != nil { diff --git a/plugin/reload/setup.go b/plugin/reload/setup.go index b52afe56079..c0dcc14e9fa 100644 --- a/plugin/reload/setup.go +++ b/plugin/reload/setup.go @@ -14,12 +14,7 @@ import ( var log = clog.NewWithPlugin("reload") -func init() { - caddy.RegisterPlugin("reload", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("reload", setup) } // the info reload is global to all application, whatever number of reloads. // it is used to transmit data between Setup and start of the hook called 'onInstanceStartup' diff --git a/plugin/rewrite/OWNERS b/plugin/rewrite/OWNERS deleted file mode 100644 index b77031d8616..00000000000 --- a/plugin/rewrite/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - greenpau - - johnbelamaric -approvers: - - greenpau - - johnbelamaric diff --git a/plugin/rewrite/condition.go b/plugin/rewrite/condition.go deleted file mode 100644 index 0d9e4b18ecb..00000000000 --- a/plugin/rewrite/condition.go +++ /dev/null @@ -1,111 +0,0 @@ -package rewrite - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/coredns/coredns/plugin/pkg/replacer" - "github.com/coredns/coredns/request" - - "github.com/miekg/dns" -) - -// Operators that are defined. -const ( - Is = "is" - Not = "not" - Has = "has" - NotHas = "not_has" - StartsWith = "starts_with" - EndsWith = "ends_with" - Match = "match" - NotMatch = "not_match" -) - -var repl = replacer.New() - -// condition is a rewrite condition. -type condition func(string, string) bool - -var conditions = map[string]condition{ - Is: isFunc, - Not: notFunc, - Has: hasFunc, - NotHas: notHasFunc, - StartsWith: startsWithFunc, - EndsWith: endsWithFunc, - Match: matchFunc, - NotMatch: notMatchFunc, -} - -// isFunc is condition for Is operator. It checks for equality. -func isFunc(a, b string) bool { return a == b } - -// notFunc is condition for Not operator. It checks for inequality. -func notFunc(a, b string) bool { return a != b } - -// hasFunc is condition for Has operator. It checks if b is a substring of a. -func hasFunc(a, b string) bool { return strings.Contains(a, b) } - -// notHasFunc is condition for NotHas operator. It checks if b is not a substring of a. -func notHasFunc(a, b string) bool { return !strings.Contains(a, b) } - -// startsWithFunc is condition for StartsWith operator. It checks if b is a prefix of a. -func startsWithFunc(a, b string) bool { return strings.HasPrefix(a, b) } - -// endsWithFunc is condition for EndsWith operator. It checks if b is a suffix of a. -func endsWithFunc(a, b string) bool { - // TODO(miek): IsSubDomain - return strings.HasSuffix(a, b) -} - -// matchFunc is condition for Match operator. It does regexp matching of a against pattern in b -// and returns if they match. -func matchFunc(a, b string) bool { - matched, _ := regexp.MatchString(b, a) - return matched -} - -// notMatchFunc is condition for NotMatch operator. It does regexp matching of a against pattern in b -// and returns if they do not match. -func notMatchFunc(a, b string) bool { - matched, _ := regexp.MatchString(b, a) - return !matched -} - -// If is statement for a rewrite condition. -type If struct { - A string - Operator string - B string -} - -// True returns true if the condition is true and false otherwise. -// If r is not nil, it replaces placeholders before comparison. -func (i If) True(r *dns.Msg) bool { - if c, ok := conditions[i.Operator]; ok { - a, b := i.A, i.B - if r != nil { - ctx := context.TODO() - state := request.Request{Req: r, W: nil} // hmm W nil? - a = repl.Replace(ctx, state, nil, i.A) - b = repl.Replace(ctx, state, nil, i.B) - } - return c(a, b) - } - return false -} - -// NewIf creates a new If condition. -func NewIf(a, operator, b string) (If, error) { - if _, ok := conditions[operator]; !ok { - return If{}, fmt.Errorf("invalid operator %v", operator) - } - return If{ - A: a, - Operator: operator, - B: b, - }, nil -} diff --git a/plugin/rewrite/edns0.go b/plugin/rewrite/edns0.go index 34aaf3d6772..2872d089ac8 100644 --- a/plugin/rewrite/edns0.go +++ b/plugin/rewrite/edns0.go @@ -1,4 +1,4 @@ -// Package rewrite is plugin for rewriting requests internally to something different. +// Package rewrite is a plugin for rewriting requests internally to something different. package rewrite import ( diff --git a/plugin/rewrite/fuzz.go b/plugin/rewrite/fuzz.go index 043a4a5c9a7..757ffab45c6 100644 --- a/plugin/rewrite/fuzz.go +++ b/plugin/rewrite/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package rewrite diff --git a/plugin/rewrite/name.go b/plugin/rewrite/name.go index 86a5f1ecd91..7a8f9ad7ca3 100644 --- a/plugin/rewrite/name.go +++ b/plugin/rewrite/name.go @@ -9,8 +9,6 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/request" - - "github.com/miekg/dns" ) type exactNameRule struct { @@ -106,9 +104,7 @@ func (rule *regexNameRule) Rewrite(ctx context.Context, state request.Request) R s := rule.Replacement for groupIndex, groupValue := range regexGroups { groupIndexStr := "{" + strconv.Itoa(groupIndex) + "}" - if strings.Contains(s, groupIndexStr) { - s = strings.Replace(s, groupIndexStr, groupValue, -1) - } + s = strings.Replace(s, groupIndexStr, groupValue, -1) } state.Req.Question[0].Name = s return RewriteDone @@ -199,7 +195,7 @@ func newNameRule(nextAction string, args ...string) (Rule, error) { }, }, nil default: - return nil, fmt.Errorf("A name rule supports only exact, prefix, suffix, substring, and regex name matching, received: %s", matchType) + return nil, fmt.Errorf("name rule supports only exact, prefix, suffix, substring, and regex name matching, received: %s", matchType) } } if len(args) == 7 { @@ -264,25 +260,9 @@ func (rule *substringNameRule) GetResponseRule() ResponseRule { return ResponseR // GetResponseRule return a rule to rewrite the response with. func (rule *regexNameRule) GetResponseRule() ResponseRule { return rule.ResponseRule } -// validName returns true if s is valid domain name and shorter than 256 characters. -func validName(s string) bool { - _, ok := dns.IsDomainName(s) - if !ok { - return false - } - if len(dns.Name(s).String()) > 255 { - return false - } - - return true -} - // hasClosingDot return true if s has a closing dot at the end. func hasClosingDot(s string) bool { - if strings.HasSuffix(s, ".") { - return true - } - return false + return strings.HasSuffix(s, ".") } // getSubExprUsage return the number of subexpressions used in s. diff --git a/plugin/rewrite/reverter.go b/plugin/rewrite/reverter.go index 5a55c7ad5e4..00e41454e4a 100644 --- a/plugin/rewrite/reverter.go +++ b/plugin/rewrite/reverter.go @@ -59,9 +59,7 @@ func (r *ResponseReverter) WriteMsg(res *dns.Msg) error { s := rule.Replacement for groupIndex, groupValue := range regexGroups { groupIndexStr := "{" + strconv.Itoa(groupIndex) + "}" - if strings.Contains(s, groupIndexStr) { - s = strings.Replace(s, groupIndexStr, groupValue, -1) - } + s = strings.Replace(s, groupIndexStr, groupValue, -1) } name = s isNameRewritten = true diff --git a/plugin/rewrite/rewrite.go b/plugin/rewrite/rewrite.go index 90652332658..13e1d2092d5 100644 --- a/plugin/rewrite/rewrite.go +++ b/plugin/rewrite/rewrite.go @@ -29,7 +29,7 @@ const ( Continue = "continue" ) -// Rewrite is plugin to rewrite requests internally before being handled. +// Rewrite is a plugin to rewrite requests internally before being handled. type Rewrite struct { Next plugin.Handler Rules []Rule @@ -44,11 +44,10 @@ func (rw Rewrite) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg for _, rule := range rw.Rules { switch result := rule.Rewrite(ctx, state); result { case RewriteDone: - if !validName(state.Req.Question[0].Name) { - x := state.Req.Question[0].Name - log.Errorf("Invalid name after rewrite: %s", x) + if _, ok := dns.IsDomainName(state.Req.Question[0].Name); !ok { + err := fmt.Errorf("invalid name after rewrite: %s", state.Req.Question[0].Name) state.Req.Question[0] = wr.originalQuestion - return dns.RcodeServerFailure, fmt.Errorf("invalid name after rewrite: %s", x) + return dns.RcodeServerFailure, err } respRule := rule.GetResponseRule() if respRule.Active { @@ -62,7 +61,6 @@ func (rw Rewrite) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg return plugin.NextOrFailure(rw.Name(), rw.Next, ctx, wr, r) } case RewriteIgnored: - break } } if rw.noRevert || len(wr.ResponseRules) == 0 { diff --git a/plugin/rewrite/rewrite_test.go b/plugin/rewrite/rewrite_test.go index 31692195438..ecab0778708 100644 --- a/plugin/rewrite/rewrite_test.go +++ b/plugin/rewrite/rewrite_test.go @@ -422,7 +422,7 @@ func optsEqual(a, b []dns.EDNS0) bool { if aa.SourceScope != bb.SourceScope { return false } - if !bytes.Equal(aa.Address, bb.Address) { + if !aa.Address.Equal(bb.Address) { return false } } else { diff --git a/plugin/rewrite/setup.go b/plugin/rewrite/setup.go index 00c0a2f2c99..8c2890c6298 100644 --- a/plugin/rewrite/setup.go +++ b/plugin/rewrite/setup.go @@ -3,19 +3,11 @@ package rewrite import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" ) -var log = clog.NewWithPlugin("rewrite") - -func init() { - caddy.RegisterPlugin("rewrite", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("rewrite", setup) } func setup(c *caddy.Controller) error { rewrites, err := rewriteParse(c) diff --git a/plugin/rewrite/ttl.go b/plugin/rewrite/ttl.go index 73445dfc919..59ed9f52a1e 100644 --- a/plugin/rewrite/ttl.go +++ b/plugin/rewrite/ttl.go @@ -159,7 +159,7 @@ func newTTLRule(nextAction string, args ...string) (Rule, error) { }, }, nil default: - return nil, fmt.Errorf("A ttl rule supports only exact, prefix, suffix, substring, and regex name matching") + return nil, fmt.Errorf("ttl rule supports only exact, prefix, suffix, substring, and regex name matching") } } if len(args) > 3 { diff --git a/plugin/root/OWNERS b/plugin/root/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/root/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/root/root.go b/plugin/root/root.go index 15cc45626c8..66177a6bd5e 100644 --- a/plugin/root/root.go +++ b/plugin/root/root.go @@ -12,12 +12,7 @@ import ( var log = clog.NewWithPlugin("root") -func init() { - caddy.RegisterPlugin("root", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("root", setup) } func setup(c *caddy.Controller) error { config := dnsserver.GetConfig(c) diff --git a/plugin/route53/OWNERS b/plugin/route53/OWNERS deleted file mode 100644 index aba4fe3da80..00000000000 --- a/plugin/route53/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - yongtang - - dilyevsky -approvers: - - yongtang - - dilyevsky diff --git a/plugin/route53/README.md b/plugin/route53/README.md index d3a1b630c12..c0e7dd2dfd2 100644 --- a/plugin/route53/README.md +++ b/plugin/route53/README.md @@ -15,9 +15,10 @@ The route53 plugin can be used when coredns is deployed on AWS or elsewhere. ~~~ txt route53 [ZONE:HOSTED_ZONE_ID...] { - [aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY] + aws_access_key [AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] + refresh DURATION } ~~~ @@ -41,19 +42,31 @@ route53 [ZONE:HOSTED_ZONE_ID...] { * **FILENAME** AWS credentials filename. Defaults to `~/.aws/credentials` are used. * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. - If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is + If **ZONES** is omitted, then fallthrough happens for all zones for which the plugin is authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then only queries for those zones will be subject to fallthrough. -* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block +* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration + block. + +* `refresh` can be used to control how long between record retrievals from Route 53. It requires + a duration string as a parameter to specify the duration between update cycles. Each update + cycle may result in many AWS API calls depending on how many domains use this plugin and how + many records are in each. Adjusting the update frequency may help reduce the potential of API + rate-limiting imposed by AWS. + +* **DURATION** A duration string. Defaults to `1m`. If units are unspecified, seconds are assumed. ## Examples -Enable route53 with implicit AWS credentials and and resolve CNAMEs via 10.0.0.1: +Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 +} + +. { forward . 10.0.0.1 } ~~~ @@ -61,7 +74,7 @@ Enable route53 with implicit AWS credentials and and resolve CNAMEs via 10.0.0.1 Enable route53 with explicit AWS credentials: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY } @@ -81,7 +94,16 @@ Enable route53 with fallthrough: Enable route53 with multiple hosted zones with the same domain: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 example.org.:Z93A52145678156 } ~~~ + +Enable route53 and refresh records every 3 minutes +~~~ txt +example.org { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { + refresh 3m + } +} +~~~ diff --git a/plugin/route53/route53.go b/plugin/route53/route53.go index 02d0b8230c4..c86d5f0f587 100644 --- a/plugin/route53/route53.go +++ b/plugin/route53/route53.go @@ -31,6 +31,7 @@ type Route53 struct { zoneNames []string client route53iface.Route53API upstream *upstream.Upstream + refresh time.Duration zMu sync.RWMutex zones zones @@ -45,11 +46,11 @@ type zone struct { type zones map[string][]*zone // New reads from the keys map which uses domain names as its key and hosted -// zone id lists as its values, validates that each domain name/zone id pair does -// exist, and returns a new *Route53. In addition to this, upstream is passed -// for doing recursive queries against CNAMEs. -// Returns error if it cannot verify any given domain name/zone id pair. -func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, up *upstream.Upstream) (*Route53, error) { +// zone id lists as its values, validates that each domain name/zone id pair +// does exist, and returns a new *Route53. In addition to this, upstream is use +// for doing recursive queries against CNAMEs. Returns error if it cannot +// verify any given domain name/zone id pair. +func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, refresh time.Duration) (*Route53, error) { zones := make(map[string][]*zone, len(keys)) zoneNames := make([]string, 0, len(keys)) for dns, hostedZoneIDs := range keys { @@ -71,7 +72,8 @@ func New(ctx context.Context, c route53iface.Route53API, keys map[string][]strin client: c, zoneNames: zoneNames, zones: zones, - upstream: up, + upstream: upstream.New(), + refresh: refresh, }, nil } @@ -87,7 +89,7 @@ func (h *Route53) Run(ctx context.Context) error { case <-ctx.Done(): log.Infof("Breaking out of Route53 update loop: %v", ctx.Err()) return - case <-time.After(1 * time.Minute): + case <-time.After(h.refresh): if err := h.updateZones(ctx); err != nil && ctx.Err() == nil /* Don't log error if ctx expired. */ { log.Errorf("Failed to update zones: %v", err) } @@ -191,7 +193,7 @@ func maybeUnescape(s string) (string, error) { case r >= rune('0') && r <= rune('9'): case r == rune('*'): if out != "" { - return "", errors.New("`*' ony supported as wildcard (leftmost label)") + return "", errors.New("`*' only supported as wildcard (leftmost label)") } case r == rune('-'): case r == rune('.'): @@ -248,6 +250,7 @@ func (h *Route53) updateZones(ctx context.Context) error { newZ.Upstream = h.upstream in := &route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(hostedZone.id), + MaxItems: aws.String("1000"), } err = h.client.ListResourceRecordSetsPagesWithContext(ctx, in, func(out *route53.ListResourceRecordSetsOutput, last bool) bool { diff --git a/plugin/route53/route53_test.go b/plugin/route53/route53_test.go index 5657e7c7ad9..aa5c82b9c43 100644 --- a/plugin/route53/route53_test.go +++ b/plugin/route53/route53_test.go @@ -5,10 +5,10 @@ import ( "errors" "reflect" "testing" + "time" "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/pkg/fall" - "github.com/coredns/coredns/plugin/pkg/upstream" "github.com/coredns/coredns/plugin/test" crequest "github.com/coredns/coredns/request" @@ -79,7 +79,7 @@ func (fakeRoute53) ListResourceRecordSetsPagesWithContext(_ aws.Context, in *rou func TestRoute53(t *testing.T) { ctx := context.Background() - r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, &upstream.Upstream{}) + r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, time.Minute) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } @@ -87,7 +87,7 @@ func TestRoute53(t *testing.T) { t.Fatalf("Expected errors for zone bad.") } - r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, &upstream.Upstream{}) + r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, 90*time.Second) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } @@ -285,7 +285,7 @@ func TestMaybeUnescape(t *testing.T) { // 3. Escaped dot, 'a' and a hyphen. No idea why but we'll allow it. {escaped: `weird\\055ex\\141mple\\056com\\056\\056`, want: "weird-example.com.."}, // 4. escaped `*` in the middle - NOT OK. - {escaped: `e\\052ample.com`, wantErr: errors.New("`*' ony supported as wildcard (leftmost label)")}, + {escaped: `e\\052ample.com`, wantErr: errors.New("`*' only supported as wildcard (leftmost label)")}, // 5. Invalid character. {escaped: `\\000.example.com`, wantErr: errors.New(`invalid character: \\000`)}, // 6. Invalid escape sequence in the middle. diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index adc2b3e006f..c285bee2d76 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -2,16 +2,20 @@ package route53 import ( "context" + "fmt" + "strconv" "strings" + "time" "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/pkg/fall" clog "github.com/coredns/coredns/plugin/pkg/log" - "github.com/coredns/coredns/plugin/pkg/upstream" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/route53/route53iface" @@ -20,49 +24,43 @@ import ( var log = clog.NewWithPlugin("route53") -func init() { - caddy.RegisterPlugin("route53", caddy.Plugin{ - ServerType: "dns", - Action: func(c *caddy.Controller) error { - f := func(credential *credentials.Credentials) route53iface.Route53API { - return route53.New(session.Must(session.NewSession(&aws.Config{ - Credentials: credential, - }))) - } - return setup(c, f) - }, - }) +func init() { plugin.Register("route53", setup) } + +// exposed for testing +var f = func(credential *credentials.Credentials) route53iface.Route53API { + return route53.New(session.Must(session.NewSession(&aws.Config{Credentials: credential}))) } -func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Route53API) error { - keyPairs := map[string]struct{}{} - keys := map[string][]string{} +func setup(c *caddy.Controller) error { + for c.Next() { + keyPairs := map[string]struct{}{} + keys := map[string][]string{} - // Route53 plugin attempts to find AWS credentials by using ChainCredentials. - // And the order of that provider chain is as follows: - // Static AWS keys -> Environment Variables -> Credentials file -> IAM role - // With that said, even though a user doesn't define any credentials in - // Corefile, we should still attempt to read the default credentials file, - // ~/.aws/credentials with the default profile. - sharedProvider := &credentials.SharedCredentialsProvider{} - var providers []credentials.Provider - var fall fall.F + // Route53 plugin attempts to find AWS credentials by using ChainCredentials. + // And the order of that provider chain is as follows: + // Static AWS keys -> Environment Variables -> Credentials file -> IAM role + // With that said, even though a user doesn't define any credentials in + // Corefile, we should still attempt to read the default credentials file, + // ~/.aws/credentials with the default profile. + sharedProvider := &credentials.SharedCredentialsProvider{} + var providers []credentials.Provider + var fall fall.F + + refresh := time.Duration(1) * time.Minute // default update frequency to 1 minute - up := upstream.New() - for c.Next() { args := c.RemainingArgs() for i := 0; i < len(args); i++ { parts := strings.SplitN(args[i], ":", 2) if len(parts) != 2 { - return c.Errf("invalid zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("invalid zone '%s'", args[i])) } dns, hostedZoneID := parts[0], parts[1] if dns == "" || hostedZoneID == "" { - return c.Errf("invalid zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("invalid zone '%s'", args[i])) } if _, ok := keyPairs[args[i]]; ok { - return c.Errf("conflict zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("conflict zone '%s'", args[i])) } keyPairs[args[i]] = struct{}{} @@ -74,7 +72,7 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro case "aws_access_key": v := c.RemainingArgs() if len(v) < 2 { - return c.Errf("invalid access key '%v'", v) + return plugin.Error("route53", c.Errf("invalid access key '%v'", v)) } providers = append(providers, &credentials.StaticProvider{ Value: credentials.Value{ @@ -95,27 +93,50 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro } case "fallthrough": fall.SetZonesFromArgs(c.RemainingArgs()) + case "refresh": + if c.NextArg() { + refreshStr := c.Val() + _, err := strconv.Atoi(refreshStr) + if err == nil { + refreshStr = fmt.Sprintf("%ss", c.Val()) + } + refresh, err = time.ParseDuration(refreshStr) + if err != nil { + return plugin.Error("route53", c.Errf("Unable to parse duration: '%v'", err)) + } + if refresh <= 0 { + return plugin.Error("route53", c.Errf("refresh interval must be greater than 0: %s", refreshStr)) + } + } else { + return plugin.Error("route53", c.ArgErr()) + } default: - return c.Errf("unknown property '%s'", c.Val()) + return plugin.Error("route53", c.Errf("unknown property '%s'", c.Val())) } } - } - providers = append(providers, &credentials.EnvProvider{}, sharedProvider) - client := f(credentials.NewChainCredentials(providers)) - ctx := context.Background() - h, err := New(ctx, client, keys, up) - if err != nil { - return c.Errf("failed to create Route53 plugin: %v", err) - } - h.Fall = fall - if err := h.Run(ctx); err != nil { - return c.Errf("failed to initialize Route53 plugin: %v", err) - } - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - h.Next = next - return h - }) + session, err := session.NewSession(&aws.Config{}) + if err != nil { + return plugin.Error("route53", err) + } + providers = append(providers, &credentials.EnvProvider{}, sharedProvider, &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session), + }) + client := f(credentials.NewChainCredentials(providers)) + ctx := context.Background() + h, err := New(ctx, client, keys, refresh) + if err != nil { + return plugin.Error("route53", c.Errf("failed to create Route53 plugin: %v", err)) + } + h.Fall = fall + if err := h.Run(ctx); err != nil { + return plugin.Error("route53", c.Errf("failed to initialize Route53 plugin: %v", err)) + } + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) + } return nil } diff --git a/plugin/route53/setup_test.go b/plugin/route53/setup_test.go index 3e827eb58a9..cb73d8fefef 100644 --- a/plugin/route53/setup_test.go +++ b/plugin/route53/setup_test.go @@ -9,7 +9,7 @@ import ( ) func TestSetupRoute53(t *testing.T) { - f := func(credential *credentials.Credentials) route53iface.Route53API { + f = func(credential *credentials.Credentials) route53iface.Route53API { return fakeRoute53{} } @@ -51,13 +51,29 @@ func TestSetupRoute53(t *testing.T) { {`route53 example.org:12345678 example.org:12345678 { }`, true}, + {`route53 example.org:12345678 { + refresh 90 +}`, false}, + {`route53 example.org:12345678 { + refresh 5m +}`, false}, + {`route53 example.org:12345678 { + refresh +}`, true}, + {`route53 example.org:12345678 { + refresh foo +}`, true}, + {`route53 example.org:12345678 { + refresh -1m +}`, true}, + {`route53 example.org { }`, true}, } for _, test := range tests { c := caddy.NewTestController("dns", test.body) - if err := setup(c, f); (err == nil) == test.expectedError { + if err := setup(c); (err == nil) == test.expectedError { t.Errorf("Unexpected errors: %v", err) } } diff --git a/plugin/secondary/OWNERS b/plugin/secondary/OWNERS deleted file mode 100644 index 252bba86cd9..00000000000 --- a/plugin/secondary/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - bradbeam - - miekg -approvers: - - bradbeam - - miekg diff --git a/plugin/secondary/setup.go b/plugin/secondary/setup.go index 401b2af7403..410bc097634 100644 --- a/plugin/secondary/setup.go +++ b/plugin/secondary/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("secondary", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("secondary", setup) } func setup(c *caddy.Controller) error { zones, err := secondaryParse(c) diff --git a/plugin/secondary/setup_test.go b/plugin/secondary/setup_test.go index 47c8abac97f..7fc36f67907 100644 --- a/plugin/secondary/setup_test.go +++ b/plugin/secondary/setup_test.go @@ -55,7 +55,7 @@ func TestSecondaryParse(t *testing.T) { } } - // This is only set *iff* we have a zone (i.e. not in all tests above) + // This is only set *if* we have a zone (i.e. not in all tests above) for _, v := range s.Z { if x := v.TransferFrom[0]; x != test.transferFrom { t.Fatalf("Test %d transform from names don't match expected %q, but got %q", i, test.transferFrom, x) diff --git a/plugin/sign/README.md b/plugin/sign/README.md new file mode 100644 index 00000000000..90d687e59f2 --- /dev/null +++ b/plugin/sign/README.md @@ -0,0 +1,168 @@ +# sign + +## Name + +*sign* - adds DNSSEC records to zone files. + +## Description + +The *sign* plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are +added. The signatures that sign the resource records sets have an expiration date, this means the +signing process must be repeated before this expiration data is reached. Otherwise the zone's data +will go BAD (RFC 4035, Section 5.5). The *sign* plugin takes care of this. + +Only NSEC is supported, *sign* does not support NSEC3. + +*Sign* works in conjunction with the *file* and *auto* plugins; this plugin **signs** the zones +files, *auto* and *file* **serve** the zones *data*. + +For this plugin to work at least one Common Signing Key, (see coredns-keygen(1)) is needed. This key +(or keys) will be used to sign the entire zone. *Sign* does not support the ZSK/KSK split, nor will +it do key or algorithm rollovers - it just signs. + +*Sign* will: + + * (Re)-sign the zone with the CSK(s) when: + + - the last time it was signed is more than a 6 days ago. Each zone will have some jitter + applied to the inception date. + + - the signature only has 14 days left before expiring. + + Both these dates are only checked on the SOA's signature(s). + + * Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours) + and a expiration of +32 days for every given DNSKEY. + + * Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the + SOA record. + + * Add or replace *all* apex CDS/CDNSKEY records with the ones derived from the given keys. For + each key two CDS are created one with SHA1 and another with SHA256. + + * Update the SOA's serial number to the *Unix epoch* of when the signing happens. This will + overwrite *any* previous serial number. + + +There are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it will +be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. + +Keys are named (following BIND9): `K++.key` and `K++.private`. +The keys **must not** be included in your zone; they will be added by *sign*. These keys can be +generated with `coredns-keygen` or BIND9's `dnssec-keygen`. You don't have to adhere to this naming +scheme, but then you need to name your keys explicitly, see the `keys file` directive. + +A generated zone is written out in a file named `db..signed` in the directory named by the +`directory` directive (which defaults to `/var/lib/coredns`). + +## Syntax + +~~~ +sign DBFILE [ZONES...] { + key file|directory KEY...|DIR... + directory DIR +} +~~~ + +* **DBFILE** the zone database file to read and parse. If the path is relative, the path from the + *root* plugin will be prepended to it. +* **ZONES** zones it should be sign for. If empty, the zones from the configuration block are + used. +* `key` specifies the key(s) (there can be multiple) to sign the zone. If `file` is + used the **KEY**'s filenames are used as is. If `directory` is used, *sign* will look in **DIR** + for `K++` files. Any metadata in these files (Activate, Publish, etc.) is + *ignored*. These keys must also be Key Signing Keys (KSK). +* `directory` specifies the **DIR** where CoreDNS should save zones that have been signed. + If not given this defaults to `/var/lib/coredns`. The zones are saved under the name + `db..signed`. If the path is relative the path from the *root* plugin will be prepended + to it. + +Keys can be generated with `coredns-keygen`, to create one for use in the *sign* plugin, use: +`coredns-keygen example.org` or `dnssec-keygen -a ECDSAP256SHA256 -f KSK example.org`. + +## Examples + +Sign the `example.org` zone contained in the file `db.example.org` and write the result to +`./db.example.org.signed` to let the *file* plugin pick it up and serve it. The keys used +are read from `/etc/coredns/keys/Kexample.org.key` and `/etc/coredns/keys/Kexample.org.private`. + +~~~ txt +example.org { + file db.example.org.signed + + sign db.example.org { + key file /etc/coredns/keys/Kexample.org + directory . + } +} +~~~ + +Running this leads to the following log output (note the timers in this example have been set to +shorter intervals). + +~~~ txt +[WARNING] plugin/file: Failed to open "open /tmp/db.example.org.signed: no such file or directory": trying again in 1m0s +[INFO] plugin/sign: Signing "example.org." because open /tmp/db.example.org.signed: no such file or directory +[INFO] plugin/sign: Successfully signed zone "example.org." in "/tmp/db.example.org.signed" with key tags "59725" and 1564766865 SOA serial, elapsed 9.357933ms, next: 2019-08-02T22:27:45.270Z +[INFO] plugin/file: Successfully reloaded zone "example.org." in "/tmp/db.example.org.signed" with serial 1564766865 +~~~ + +Or use a single zone file for *multiple* zones, note that the **ZONES** are repeated for both plugins. +Also note this outputs *multiple* signed output files. Here we use the default output directory +`/var/lib/coredns`. + +~~~ txt +. { + file /var/lib/coredns/db.example.org.signed example.org + file /var/lib/coredns/db.example.net.signed example.net + sign db.example.org example.org example.net { + key directory /etc/coredns/keys + } +} +~~~ + +This is the same configuration, but the zones are put in the server block, but note that you still +need to specify what file is served for what zone in the *file* plugin: + +~~~ txt +example.org example.net { + file var/lib/coredns/db.example.org.signed example.org + file var/lib/coredns/db.example.net.signed example.net + sign db.example.org { + key directory /etc/coredns/keys + } +} +~~~ + +Be careful to fully list the origins you want to sign, if you don't: + +~~~ txt +example.org example.net { + sign plugin/sign/testdata/db.example.org miek.org { + key file /etc/coredns/keys/Kexample.org + } +} +~~~ + +This will lead to `db.example.org` be signed *twice*, as this entire section is parsed twice because +you have specified the origins `example.org` and `example.net` in the server block. + +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep +on serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +file. + +## Also See + +The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 6781. Further more the +manual pages coredns-keygen(1) and dnssec-keygen(8). And the *file* plugin's documentation. + +Coredns-keygen can be found at +[https://github.com/coredns/coredns-utils](https://github.com/coredns/coredns-utils) in the +coredns-keygen directory. + +Other useful DNSSEC tools can be found in [ldns](https://nlnetlabs.nl/projects/ldns/about/), e.g. +`ldns-key2ds` to create DS records from DNSKEYs. + +## Bugs + +`keys directory` is not implemented. diff --git a/plugin/sign/dnssec.go b/plugin/sign/dnssec.go new file mode 100644 index 00000000000..a95e08644b4 --- /dev/null +++ b/plugin/sign/dnssec.go @@ -0,0 +1,20 @@ +package sign + +import ( + "github.com/miekg/dns" +) + +func (p Pair) signRRs(rrs []dns.RR, signerName string, ttl, incep, expir uint32) (*dns.RRSIG, error) { + rrsig := &dns.RRSIG{ + Hdr: dns.RR_Header{Rrtype: dns.TypeRRSIG, Ttl: ttl}, + Algorithm: p.Public.Algorithm, + SignerName: signerName, + KeyTag: p.KeyTag, + OrigTtl: ttl, + Inception: incep, + Expiration: expir, + } + + e := rrsig.Sign(p.Private, rrs) + return rrsig, e +} diff --git a/plugin/sign/file.go b/plugin/sign/file.go new file mode 100644 index 00000000000..b1190126df2 --- /dev/null +++ b/plugin/sign/file.go @@ -0,0 +1,93 @@ +package sign + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + + "github.com/miekg/dns" +) + +// write writes out the zone file to a temporary file which is then moved into the correct place. +func (s *Signer) write(z *file.Zone) error { + f, err := ioutil.TempFile(s.directory, "signed-") + if err != nil { + return err + } + + if err := write(f, z); err != nil { + f.Close() + return err + } + + f.Close() + return os.Rename(f.Name(), filepath.Join(s.directory, s.signedfile)) +} + +func write(w io.Writer, z *file.Zone) error { + if _, err := io.WriteString(w, z.Apex.SOA.String()); err != nil { + return err + } + w.Write([]byte("\n")) // RR Stringer() method doesn't include newline, which ends the RR in a zone file, write that here. + for _, rr := range z.Apex.SIGSOA { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + for _, rr := range z.Apex.NS { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + for _, rr := range z.Apex.SIGNS { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + err := z.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + for _, r := range e.All() { + io.WriteString(w, r.String()) + w.Write([]byte("\n")) + } + return nil + }) + return err +} + +// Parse parses the zone in filename and returns a new Zone or an error. This +// is similar to the Parse function in the *file* plugin. However when parsing +// the record types DNSKEY, RRSIG, CDNSKEY and CDS are *not* included in the returned +// zone (if encountered). +func Parse(f io.Reader, origin, fileName string) (*file.Zone, error) { + zp := dns.NewZoneParser(f, dns.Fqdn(origin), fileName) + zp.SetIncludeAllowed(true) + z := file.NewZone(origin, fileName) + seenSOA := false + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + if err := zp.Err(); err != nil { + return nil, err + } + + switch rr.(type) { + case *dns.DNSKEY, *dns.RRSIG, *dns.CDNSKEY, *dns.CDS: + continue + case *dns.SOA: + seenSOA = true + if err := z.Insert(rr); err != nil { + return nil, err + } + default: + if err := z.Insert(rr); err != nil { + return nil, err + } + } + } + if !seenSOA { + return nil, fmt.Errorf("file %q has no SOA record", fileName) + } + + return z, nil +} diff --git a/plugin/sign/file_test.go b/plugin/sign/file_test.go new file mode 100644 index 00000000000..72d2b02ac04 --- /dev/null +++ b/plugin/sign/file_test.go @@ -0,0 +1,43 @@ +package sign + +import ( + "os" + "testing" + + "github.com/miekg/dns" +) + +func TestFileParse(t *testing.T) { + f, err := os.Open("testdata/db.miek.nl") + if err != nil { + t.Fatal(err) + } + z, err := Parse(f, "miek.nl.", "testdata/db.miek.nl") + if err != nil { + t.Fatal(err) + } + s := &Signer{ + directory: ".", + signedfile: "db.miek.nl.test", + } + + s.write(z) + defer os.Remove("db.miek.nl.test") + + f, err = os.Open("db.miek.nl.test") + if err != nil { + t.Fatal(err) + } + z, err = Parse(f, "miek.nl.", "db.miek.nl.test") + if err != nil { + t.Fatal(err) + } + if x := z.Apex.SOA.Header().Name; x != "miek.nl." { + t.Errorf("Expected SOA name to be %s, got %s", x, "miek.nl.") + } + apex, _ := z.Search("miek.nl.") + key := apex.Type(dns.TypeDNSKEY) + if key != nil { + t.Errorf("Expected no DNSKEYs, but got %d", len(key)) + } +} diff --git a/plugin/sign/keys.go b/plugin/sign/keys.go new file mode 100644 index 00000000000..03065e8b434 --- /dev/null +++ b/plugin/sign/keys.go @@ -0,0 +1,119 @@ +package sign + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" + "golang.org/x/crypto/ed25519" +) + +// Pair holds DNSSEC key information, both the public and private components are stored here. +type Pair struct { + Public *dns.DNSKEY + KeyTag uint16 + Private crypto.Signer +} + +// keyParse reads the public and private key from disk. +func keyParse(c *caddy.Controller) ([]Pair, error) { + if !c.NextArg() { + return nil, c.ArgErr() + } + pairs := []Pair{} + config := dnsserver.GetConfig(c) + + switch c.Val() { + case "file": + ks := c.RemainingArgs() + if len(ks) == 0 { + return nil, c.ArgErr() + } + for _, k := range ks { + base := k + // Kmiek.nl.+013+26205.key, handle .private or without extension: Kmiek.nl.+013+26205 + if strings.HasSuffix(k, ".key") { + base = k[:len(k)-4] + } + if strings.HasSuffix(k, ".private") { + base = k[:len(k)-8] + } + if !filepath.IsAbs(base) && config.Root != "" { + base = filepath.Join(config.Root, base) + } + + pair, err := readKeyPair(base+".key", base+".private") + if err != nil { + return nil, err + } + pairs = append(pairs, pair) + } + case "directory": + return nil, fmt.Errorf("directory: not implemented") + } + + return pairs, nil +} + +func readKeyPair(public, private string) (Pair, error) { + rk, err := os.Open(public) + if err != nil { + return Pair{}, err + } + b, err := ioutil.ReadAll(rk) + if err != nil { + return Pair{}, err + } + dnskey, err := dns.NewRR(string(b)) + if err != nil { + return Pair{}, err + } + if _, ok := dnskey.(*dns.DNSKEY); !ok { + return Pair{}, fmt.Errorf("RR in %q is not a DNSKEY: %d", public, dnskey.Header().Rrtype) + } + ksk := dnskey.(*dns.DNSKEY).Flags&(1<<8) == (1<<8) && dnskey.(*dns.DNSKEY).Flags&1 == 1 + if !ksk { + return Pair{}, fmt.Errorf("DNSKEY in %q is not a CSK/KSK", public) + } + + rp, err := os.Open(private) + if err != nil { + return Pair{}, err + } + privkey, err := dnskey.(*dns.DNSKEY).ReadPrivateKey(rp, private) + if err != nil { + return Pair{}, err + } + switch signer := privkey.(type) { + case *ecdsa.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + case ed25519.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + case *rsa.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + default: + return Pair{}, fmt.Errorf("unsupported algorithm %s", signer) + } +} + +// keyTag returns the key tags of the keys in ps as a formatted string. +func keyTag(ps []Pair) string { + if len(ps) == 0 { + return "" + } + s := "" + for _, p := range ps { + s += strconv.Itoa(int(p.KeyTag)) + "," + } + return s[:len(s)-1] +} diff --git a/plugin/sign/log_test.go b/plugin/sign/log_test.go new file mode 100644 index 00000000000..2726cd179f0 --- /dev/null +++ b/plugin/sign/log_test.go @@ -0,0 +1,5 @@ +package sign + +import clog "github.com/coredns/coredns/plugin/pkg/log" + +func init() { clog.Discard() } diff --git a/plugin/sign/nsec.go b/plugin/sign/nsec.go new file mode 100644 index 00000000000..d7c6a30a361 --- /dev/null +++ b/plugin/sign/nsec.go @@ -0,0 +1,36 @@ +package sign + +import ( + "sort" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + + "github.com/miekg/dns" +) + +// names returns the elements of the zone in nsec order. +func names(origin string, z *file.Zone) []string { + // There will also be apex records other than NS and SOA (who are kept separate), as we + // are adding DNSKEY and CDS/CDNSKEY records in the apex *before* we sign. + n := []string{} + z.AuthWalk(func(e *tree.Elem, _ map[uint16][]dns.RR, auth bool) error { + if !auth { + return nil + } + n = append(n, e.Name()) + return nil + }) + return n +} + +// NSEC returns an NSEC record according to name, next, ttl and bitmap. Note that the bitmap is sorted before use. +func NSEC(name, next string, ttl uint32, bitmap []uint16) *dns.NSEC { + sort.Slice(bitmap, func(i, j int) bool { return bitmap[i] < bitmap[j] }) + + return &dns.NSEC{ + Hdr: dns.RR_Header{Name: name, Ttl: ttl, Rrtype: dns.TypeNSEC, Class: dns.ClassINET}, + NextDomain: next, + TypeBitMap: bitmap, + } +} diff --git a/plugin/sign/nsec_test.go b/plugin/sign/nsec_test.go new file mode 100644 index 00000000000..f272651fc6a --- /dev/null +++ b/plugin/sign/nsec_test.go @@ -0,0 +1,27 @@ +package sign + +import ( + "os" + "testing" + + "github.com/coredns/coredns/plugin/file" +) + +func TestNames(t *testing.T) { + f, err := os.Open("testdata/db.miek.nl_ns") + if err != nil { + t.Error(err) + } + z, err := file.Parse(f, "db.miek.nl_ns", "miek.nl", 0) + if err != nil { + t.Error(err) + } + + names := names("miek.nl.", z) + expected := []string{"miek.nl.", "child.miek.nl.", "www.miek.nl."} + for i := range names { + if names[i] != expected[i] { + t.Errorf("Expected %s, got %s", expected[i], names[i]) + } + } +} diff --git a/plugin/sign/resign_test.go b/plugin/sign/resign_test.go new file mode 100644 index 00000000000..2f67f52aa78 --- /dev/null +++ b/plugin/sign/resign_test.go @@ -0,0 +1,40 @@ +package sign + +import ( + "strings" + "testing" + "time" +) + +func TestResignInception(t *testing.T) { + then := time.Date(2019, 7, 18, 22, 50, 0, 0, time.UTC) + // signed yesterday + zr := strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190808191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x != nil { + t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) + } + // inception starts after this date. + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190808191936 20190731161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } +} + +func TestResignExpire(t *testing.T) { + then := time.Date(2019, 7, 18, 22, 50, 0, 0, time.UTC) + // expires tomorrow + zr := strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190717191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } + // expire too far away + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190731191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x != nil { + t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) + } + // expired yesterday + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190721191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } +} diff --git a/plugin/sign/setup.go b/plugin/sign/setup.go new file mode 100644 index 00000000000..8f4d4abd5c2 --- /dev/null +++ b/plugin/sign/setup.go @@ -0,0 +1,109 @@ +package sign + +import ( + "fmt" + "math/rand" + "path/filepath" + "time" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + + "github.com/caddyserver/caddy" +) + +func init() { plugin.Register("sign", setup) } + +func setup(c *caddy.Controller) error { + sign, err := parse(c) + if err != nil { + return plugin.Error("sign", err) + } + + c.OnStartup(sign.OnStartup) + c.OnStartup(func() error { + for _, signer := range sign.signers { + go signer.refresh(DurationRefreshHours) + } + return nil + }) + c.OnShutdown(func() error { + for _, signer := range sign.signers { + close(signer.stop) + } + return nil + }) + + // Don't call AddPlugin, *sign* is not a plugin. + return nil +} + +func parse(c *caddy.Controller) (*Sign, error) { + sign := &Sign{} + config := dnsserver.GetConfig(c) + + for c.Next() { + if !c.NextArg() { + return nil, c.ArgErr() + } + dbfile := c.Val() + if !filepath.IsAbs(dbfile) && config.Root != "" { + dbfile = filepath.Join(config.Root, dbfile) + } + + origins := make([]string, len(c.ServerBlockKeys)) + copy(origins, c.ServerBlockKeys) + args := c.RemainingArgs() + if len(args) > 0 { + origins = args + } + for i := range origins { + origins[i] = plugin.Host(origins[i]).Normalize() + } + + signers := make([]*Signer, len(origins)) + for i := range origins { + signers[i] = &Signer{ + dbfile: dbfile, + origin: plugin.Host(origins[i]).Normalize(), + jitter: time.Duration(float32(DurationJitter) * rand.Float32()), + directory: "/var/lib/coredns", + stop: make(chan struct{}), + signedfile: fmt.Sprintf("db.%ssigned", origins[i]), // origins[i] is a fqdn, so it ends with a dot, hence %ssigned. + } + } + + for c.NextBlock() { + switch c.Val() { + case "key": + pairs, err := keyParse(c) + if err != nil { + return sign, err + } + for i := range signers { + for _, p := range pairs { + p.Public.Header().Name = signers[i].origin + } + signers[i].keys = append(signers[i].keys, pairs...) + } + case "directory": + dir := c.RemainingArgs() + if len(dir) == 0 || len(dir) > 1 { + return sign, fmt.Errorf("can only be one argument after %q", "directory") + } + if !filepath.IsAbs(dir[0]) && config.Root != "" { + dir[0] = filepath.Join(config.Root, dir[0]) + } + for i := range signers { + signers[i].directory = dir[0] + signers[i].signedfile = fmt.Sprintf("db.%ssigned", signers[i].origin) + } + default: + return nil, c.Errf("unknown property '%s'", c.Val()) + } + } + sign.signers = append(sign.signers, signers...) + } + + return sign, nil +} diff --git a/plugin/sign/setup_test.go b/plugin/sign/setup_test.go new file mode 100644 index 00000000000..ce25720a0cb --- /dev/null +++ b/plugin/sign/setup_test.go @@ -0,0 +1,75 @@ +package sign + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestParse(t *testing.T) { + tests := []struct { + input string + shouldErr bool + exp *Signer + }{ + {`sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + }`, + false, + &Signer{ + keys: []Pair{}, + origin: "miek.nl.", + dbfile: "testdata/db.miek.nl", + directory: "/var/lib/coredns", + signedfile: "db.miek.nl.signed", + }, + }, + {`sign testdata/db.miek.nl example.org { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }`, + false, + &Signer{ + keys: []Pair{}, + origin: "example.org.", + dbfile: "testdata/db.miek.nl", + directory: "testdata", + signedfile: "db.example.org.signed", + }, + }, + // errors + {`sign db.example.org { + key file /etc/coredns/keys/Kexample.org + }`, + true, + nil, + }, + } + for i, tc := range tests { + c := caddy.NewTestController("dns", tc.input) + sign, err := parse(c) + + if err == nil && tc.shouldErr { + t.Fatalf("Test %d expected errors, but got no error", i) + } + if err != nil && !tc.shouldErr { + t.Fatalf("Test %d expected no errors, but got '%v'", i, err) + } + if tc.shouldErr { + continue + } + signer := sign.signers[0] + if x := signer.origin; x != tc.exp.origin { + t.Errorf("Test %d expected %s as origin, got %s", i, tc.exp.origin, x) + } + if x := signer.dbfile; x != tc.exp.dbfile { + t.Errorf("Test %d expected %s as dbfile, got %s", i, tc.exp.dbfile, x) + } + if x := signer.directory; x != tc.exp.directory { + t.Errorf("Test %d expected %s as directory, got %s", i, tc.exp.directory, x) + } + if x := signer.signedfile; x != tc.exp.signedfile { + t.Errorf("Test %d expected %s as signedfile, got %s", i, tc.exp.signedfile, x) + } + } +} diff --git a/plugin/sign/sign.go b/plugin/sign/sign.go new file mode 100644 index 00000000000..f5eb9afacd8 --- /dev/null +++ b/plugin/sign/sign.go @@ -0,0 +1,37 @@ +// Package sign implements a zone signer as a plugin. +package sign + +import ( + "path/filepath" + "time" +) + +// Sign contains signers that sign the zones files. +type Sign struct { + signers []*Signer +} + +// OnStartup scans all signers and signs or resigns zones if needed. +func (s *Sign) OnStartup() error { + for _, signer := range s.signers { + why := signer.resign() + if why == nil { + log.Infof("Skipping signing zone %q in %q: signatures are valid", signer.origin, filepath.Join(signer.directory, signer.signedfile)) + continue + } + go signAndLog(signer, why) + } + return nil +} + +// Various duration constants for signing of the zones. +const ( + DurationExpireDays = 7 * 24 * time.Hour // max time allowed before expiration + DurationResignDays = 6 * 24 * time.Hour // if the last sign happenend this long ago, sign again + DurationSignatureExpireDays = 32 * 24 * time.Hour // sign for 32 days + DurationRefreshHours = 5 * time.Hour // check zones every 5 hours + DurationJitter = -18 * time.Hour // default max jitter + DurationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is subtracted +) + +const timeFmt = "2006-01-02T15:04:05.000Z07:00" diff --git a/plugin/sign/signer.go b/plugin/sign/signer.go new file mode 100644 index 00000000000..b0e2f02fef4 --- /dev/null +++ b/plugin/sign/signer.go @@ -0,0 +1,209 @@ +package sign + +import ( + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + clog "github.com/coredns/coredns/plugin/pkg/log" + + "github.com/miekg/dns" +) + +var log = clog.NewWithPlugin("sign") + +// Signer holds the data needed to sign a zone file. +type Signer struct { + keys []Pair + origin string + dbfile string + directory string + jitter time.Duration + + signedfile string + stop chan struct{} +} + +// Sign signs a zone file according to the parameters in s. +func (s *Signer) Sign(now time.Time) (*file.Zone, error) { + rd, err := os.Open(s.dbfile) + if err != nil { + return nil, err + } + + z, err := Parse(rd, s.origin, s.dbfile) + if err != nil { + return nil, err + } + + mttl := z.Apex.SOA.Minttl + ttl := z.Apex.SOA.Header().Ttl + inception, expiration := lifetime(now, s.jitter) + z.Apex.SOA.Serial = uint32(now.Unix()) + + for _, pair := range s.keys { + pair.Public.Header().Ttl = ttl // set TTL on key so it matches the RRSIG. + z.Insert(pair.Public) + z.Insert(pair.Public.ToDS(dns.SHA1).ToCDS()) + z.Insert(pair.Public.ToDS(dns.SHA256).ToCDS()) + z.Insert(pair.Public.ToCDNSKEY()) + } + + names := names(s.origin, z) + ln := len(names) + + for _, pair := range s.keys { + rrsig, err := pair.signRRs([]dns.RR{z.Apex.SOA}, s.origin, ttl, inception, expiration) + if err != nil { + return nil, err + } + z.Insert(rrsig) + // NS apex may not be set if RR's have been discarded because the origin doesn't match. + if len(z.Apex.NS) > 0 { + rrsig, err = pair.signRRs(z.Apex.NS, s.origin, ttl, inception, expiration) + if err != nil { + return nil, err + } + z.Insert(rrsig) + } + } + + // We are walking the tree in the same direction, so names[] can be used here to indicated the next element. + i := 1 + err = z.AuthWalk(func(e *tree.Elem, zrrs map[uint16][]dns.RR, auth bool) error { + if !auth { + return nil + } + + if e.Name() == s.origin { + nsec := NSEC(e.Name(), names[(ln+i)%ln], mttl, append(e.Types(), dns.TypeNS, dns.TypeSOA, dns.TypeRRSIG, dns.TypeNSEC)) + z.Insert(nsec) + } else { + nsec := NSEC(e.Name(), names[(ln+i)%ln], mttl, append(e.Types(), dns.TypeRRSIG, dns.TypeNSEC)) + z.Insert(nsec) + } + + for t, rrs := range zrrs { + // RRSIGs are not signed and NS records are not signed because we are never authoratiative for them. + // The zone's apex nameservers records are not kept in this tree and are signed separately. + if t == dns.TypeRRSIG || t == dns.TypeNS { + continue + } + for _, pair := range s.keys { + rrsig, err := pair.signRRs(rrs, s.origin, rrs[0].Header().Ttl, inception, expiration) + if err != nil { + return err + } + e.Insert(rrsig) + } + } + i++ + return nil + }) + return z, err +} + +// resign checks if the signed zone exists, or needs resigning. +func (s *Signer) resign() error { + signedfile := filepath.Join(s.directory, s.signedfile) + rd, err := os.Open(signedfile) + if err != nil && os.IsNotExist(err) { + return err + } + + now := time.Now().UTC() + return resign(rd, now) +} + +// resign will scan rd and check the signature on the SOA record. We will resign on the basis +// of 2 conditions: +// * either the inception is more than 6 days ago, or +// * we only have 1 week left on the signature +// +// All SOA signatures will be checked. If the SOA isn't found in the first 100 +// records, we will resign the zone. +func resign(rd io.Reader, now time.Time) (why error) { + zp := dns.NewZoneParser(rd, ".", "resign") + zp.SetIncludeAllowed(true) + i := 0 + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + if err := zp.Err(); err != nil { + return err + } + + switch x := rr.(type) { + case *dns.RRSIG: + if x.TypeCovered != dns.TypeSOA { + continue + } + incep, _ := time.Parse("20060102150405", dns.TimeToString(x.Inception)) + // If too long ago, resign. + if now.Sub(incep) >= 0 && now.Sub(incep) > DurationResignDays { + return fmt.Errorf("inception %q was more than: %s ago from %s: %s", incep.Format(timeFmt), DurationResignDays, now.Format(timeFmt), now.Sub(incep)) + } + // Inception hasn't even start yet. + if now.Sub(incep) < 0 { + return fmt.Errorf("inception %q date is in the future: %s", incep.Format(timeFmt), now.Sub(incep)) + } + + expire, _ := time.Parse("20060102150405", dns.TimeToString(x.Expiration)) + if expire.Sub(now) < DurationExpireDays { + return fmt.Errorf("expiration %q is less than: %s away from %s: %s", expire.Format(timeFmt), DurationExpireDays, now.Format(timeFmt), expire.Sub(now)) + } + } + i++ + if i > 100 { + // 100 is a random number. A SOA record should be the first in the zonefile, but RFC 1035 doesn't actually mandate this. So it could + // be 3rd or even later. The number 100 looks crazy high enough that it will catch all weird zones, but not high enough to keep the CPU + // busy with parsing all the time. + return fmt.Errorf("no SOA RRSIG found in first 100 records") + } + } + + return nil +} + +func signAndLog(s *Signer, why error) { + now := time.Now().UTC() + z, err := s.Sign(now) + log.Infof("Signing %q because %s", s.origin, why) + if err != nil { + log.Warningf("Error signing %q with key tags %q in %s: %s, next: %s", s.origin, keyTag(s.keys), time.Since(now), err, now.Add(DurationRefreshHours).Format(timeFmt)) + return + } + + if err := s.write(z); err != nil { + log.Warningf("Error signing %q: failed to move zone file into place: %s", s.origin, err) + return + } + log.Infof("Successfully signed zone %q in %q with key tags %q and %d SOA serial, elapsed %f, next: %s", s.origin, filepath.Join(s.directory, s.signedfile), keyTag(s.keys), z.Apex.SOA.Serial, time.Since(now).Seconds(), now.Add(DurationRefreshHours).Format(timeFmt)) +} + +// refresh checks every val if some zones need to be resigned. +func (s *Signer) refresh(val time.Duration) { + tick := time.NewTicker(val) + defer tick.Stop() + for { + select { + case <-s.stop: + return + case <-tick.C: + why := s.resign() + if why == nil { + continue + } + signAndLog(s, why) + } + } +} + +func lifetime(now time.Time, jitter time.Duration) (uint32, uint32) { + incep := uint32(now.Add(DurationSignatureInceptionHours).Add(jitter).Unix()) + expir := uint32(now.Add(DurationSignatureExpireDays).Unix()) + return incep, expir +} diff --git a/plugin/sign/signer_test.go b/plugin/sign/signer_test.go new file mode 100644 index 00000000000..bb364728ec8 --- /dev/null +++ b/plugin/sign/signer_test.go @@ -0,0 +1,182 @@ +package sign + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" +) + +func TestSign(t *testing.T) { + input := `sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + apex, _ := z.Search("miek.nl.") + if x := apex.Type(dns.TypeDS); len(x) != 0 { + t.Errorf("Expected %d DS records, got %d", 0, len(x)) + } + if x := apex.Type(dns.TypeCDS); len(x) != 2 { + t.Errorf("Expected %d CDS records, got %d", 2, len(x)) + } + if x := apex.Type(dns.TypeCDNSKEY); len(x) != 1 { + t.Errorf("Expected %d CDNSKEY record, got %d", 1, len(x)) + } + if x := apex.Type(dns.TypeDNSKEY); len(x) != 1 { + t.Errorf("Expected %d DNSKEY record, got %d", 1, len(x)) + } +} + +func TestSignApexZone(t *testing.T) { + apex := `$TTL 30M +$ORIGIN example.org. +@ IN SOA linode miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + IN NS linode +` + if err := ioutil.WriteFile("db.apex-test.example.org", []byte(apex), 0644); err != nil { + t.Fatal(err) + } + defer os.Remove("db.apex-test.example.org") + input := `sign db.apex-test.example.org example.org { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + el, _ := z.Search("example.org.") + nsec := el.Type(dns.TypeNSEC) + if len(nsec) != 1 { + t.Errorf("Expected 1 NSEC for %s, got %d", "example.org.", len(nsec)) + } + if x := nsec[0].(*dns.NSEC).NextDomain; x != "example.org." { + t.Errorf("Expected NSEC NextDomain %s, got %s", "example.org.", x) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; len(x) != 7 { + t.Errorf("Expected NSEC bitmap to be %d elements, got %d", 7, x) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[6] != dns.TypeCDNSKEY { + t.Errorf("Expected NSEC bitmap element 5 to be %d, got %d", dns.TypeCDNSKEY, x[6]) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[4] != dns.TypeDNSKEY { + t.Errorf("Expected NSEC bitmap element 4 to be %d, got %d", dns.TypeDNSKEY, x[4]) + } + dnskey := el.Type(dns.TypeDNSKEY) + if x := dnskey[0].Header().Ttl; x != 1800 { + t.Errorf("Expected DNSKEY TTL to be %d, got %d", 1800, x) + } + sigs := el.Type(dns.TypeRRSIG) + for _, s := range sigs { + if s.(*dns.RRSIG).TypeCovered == dns.TypeDNSKEY { + if s.(*dns.RRSIG).OrigTtl != dnskey[0].Header().Ttl { + t.Errorf("Expected RRSIG original TTL to match DNSKEY TTL, but %d != %d", s.(*dns.RRSIG).OrigTtl, dnskey[0].Header().Ttl) + } + if s.(*dns.RRSIG).SignerName != dnskey[0].Header().Name { + t.Errorf("Expected RRSIG signer name to match DNSKEY ownername, but %s != %s", s.(*dns.RRSIG).SignerName, dnskey[0].Header().Name) + } + } + } +} + +func TestSignGlue(t *testing.T) { + input := `sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + e, _ := z.Search("ns2.bla.miek.nl.") + sigs := e.Type(dns.TypeRRSIG) + if len(sigs) != 0 { + t.Errorf("Expected no RRSIG for %s, got %d", "ns2.bla.miek.nl.", len(sigs)) + } +} + +func TestSignDS(t *testing.T) { + input := `sign testdata/db.miek.nl_ns miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + // dnssec-signzone outputs this for db.miek.nl_ns: + // + // child.miek.nl. 1800 IN NS ns.child.miek.nl. + // child.miek.nl. 1800 IN DS 34385 13 2 fc7397c77afbccb6742fc.... + // child.miek.nl. 1800 IN RRSIG DS 13 3 1800 20191223121229 20191123121229 59725 miek.nl. ZwptLzVVs.... + // child.miek.nl. 14400 IN NSEC www.miek.nl. NS DS RRSIG NSEC + // child.miek.nl. 14400 IN RRSIG NSEC 13 3 14400 20191223121229 20191123121229 59725 miek.nl. w+CcA8... + + name := "child.miek.nl." + e, _ := z.Search(name) + if x := len(e.Types()); x != 4 { // NS DS NSEC and 2x RRSIG + t.Errorf("Expected 4 records for %s, got %d", name, x) + } + + ds := e.Type(dns.TypeDS) + if len(ds) != 1 { + t.Errorf("Expected DS for %s, got %d", name, len(ds)) + } + sigs := e.Type(dns.TypeRRSIG) + if len(sigs) != 2 { + t.Errorf("Expected no RRSIG for %s, got %d", name, len(sigs)) + } + nsec := e.Type(dns.TypeNSEC) + if x := nsec[0].(*dns.NSEC).NextDomain; x != "www.miek.nl." { + t.Errorf("Expected no NSEC NextDomain to be %s for %s, got %s", "www.miek.nl.", name, x) + } + minttl := z.Apex.SOA.Minttl + if x := nsec[0].Header().Ttl; x != minttl { + t.Errorf("Expected no NSEC TTL to be %d for %s, got %d", minttl, "www.miek.nl.", x) + } + // print zone on error + buf := &bytes.Buffer{} + write(buf, z) + t.Logf("%s\n", buf) +} diff --git a/plugin/sign/testdata/Kmiek.nl.+013+59725.key b/plugin/sign/testdata/Kmiek.nl.+013+59725.key new file mode 100644 index 00000000000..b3e3654e38f --- /dev/null +++ b/plugin/sign/testdata/Kmiek.nl.+013+59725.key @@ -0,0 +1,5 @@ +; This is a key-signing key, keyid 59725, for miek.nl. +; Created: 20190709192036 (Tue Jul 9 20:20:36 2019) +; Publish: 20190709192036 (Tue Jul 9 20:20:36 2019) +; Activate: 20190709192036 (Tue Jul 9 20:20:36 2019) +miek.nl. IN DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ 52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== diff --git a/plugin/sign/testdata/Kmiek.nl.+013+59725.private b/plugin/sign/testdata/Kmiek.nl.+013+59725.private new file mode 100644 index 00000000000..2545ed9a9ee --- /dev/null +++ b/plugin/sign/testdata/Kmiek.nl.+013+59725.private @@ -0,0 +1,6 @@ +Private-key-format: v1.3 +Algorithm: 13 (ECDSAP256SHA256) +PrivateKey: rm7EdHRca//6xKpJzeoLt/mrfgQnltJ0WpQGtOG59yo= +Created: 20190709192036 +Publish: 20190709192036 +Activate: 20190709192036 diff --git a/plugin/sign/testdata/db.miek.nl b/plugin/sign/testdata/db.miek.nl new file mode 100644 index 00000000000..4041b1b5ef4 --- /dev/null +++ b/plugin/sign/testdata/db.miek.nl @@ -0,0 +1,17 @@ +$TTL 30M +$ORIGIN miek.nl. +@ IN SOA linode.atoom.net. miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + IN NS linode.atoom.net. + IN MX 1 aspmx.l.google.com. + IN AAAA 2a01:7e00::f03c:91ff:fe79:234c + IN DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== + +a IN AAAA 2a01:7e00::f03c:91ff:fe79:234c +www IN CNAME a + + +bla IN NS ns1.bla.com. +ns3.blaaat.miek.nl. IN AAAA ::1 ; non-glue, should be signed. +; in baliwick nameserver that requires glue, should not be signed +bla IN NS ns2.bla.miek.nl. +ns2.bla.miek.nl. IN A 127.0.0.1 diff --git a/plugin/sign/testdata/db.miek.nl_ns b/plugin/sign/testdata/db.miek.nl_ns new file mode 100644 index 00000000000..bd2371f1600 --- /dev/null +++ b/plugin/sign/testdata/db.miek.nl_ns @@ -0,0 +1,10 @@ +$TTL 30M +$ORIGIN miek.nl. +@ IN SOA linode.atoom.net. miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + NS linode.atoom.net. + DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== + +www AAAA ::1 +child NS ns.child +ns.child AAAA ::1 +child DS 34385 13 2 fc7397c77afbccb6742fcff19c7b1410d0044661e7085fc200ae1ab3d15a5842 diff --git a/plugin/template/OWNERS b/plugin/template/OWNERS deleted file mode 100644 index b97eccf6238..00000000000 --- a/plugin/template/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - rtreffer -approvers: - - rtreffer diff --git a/plugin/template/README.md b/plugin/template/README.md index 64af37f9311..654de86c9f0 100644 --- a/plugin/template/README.md +++ b/plugin/template/README.md @@ -59,7 +59,7 @@ The output of the template must be a [RFC 1035](https://tools.ietf.org/html/rfc1 ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_template_matches_total{server, regex}` the total number of matched requests by regex. * `coredns_template_template_failures_total{server, regex,section,template}` the number of times the Go templating failed. Regex, section and template label values can be used to map the error back to the config file. diff --git a/plugin/template/setup.go b/plugin/template/setup.go index 045459ef1d4..908266f291d 100644 --- a/plugin/template/setup.go +++ b/plugin/template/setup.go @@ -12,12 +12,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("template", caddy.Plugin{ - ServerType: "dns", - Action: setupTemplate, - }) -} +func init() { plugin.Register("template", setupTemplate) } func setupTemplate(c *caddy.Controller) error { handler, err := templateParse(c) diff --git a/plugin/template/template.go b/plugin/template/template.go index eaa291f7f79..f2a7b186fe7 100644 --- a/plugin/template/template.go +++ b/plugin/template/template.go @@ -73,7 +73,7 @@ func (h Handler) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) } for _, template := range h.Templates { - data, match, fthrough := template.match(ctx, state, zone) + data, match, fthrough := template.match(ctx, state) if !match { if !fthrough { return dns.RcodeNameError, nil @@ -143,11 +143,11 @@ func executeRRTemplate(server, section string, template *gotmpl.Template, data * return rr, nil } -func (t template) match(ctx context.Context, state request.Request, zone string) (*templateData, bool, bool) { +func (t template) match(ctx context.Context, state request.Request) (*templateData, bool, bool) { q := state.Req.Question[0] data := &templateData{md: metadata.ValueFuncs(ctx)} - zone = plugin.Zones(t.zones).Matches(state.Name()) + zone := plugin.Zones(t.zones).Matches(state.Name()) if zone == "" { return data, false, true } diff --git a/plugin/template/template_test.go b/plugin/template/template_test.go index 1aa26229fb2..7e8f988f2cc 100644 --- a/plugin/template/template_test.go +++ b/plugin/template/template_test.go @@ -421,7 +421,7 @@ func TestHandler(t *testing.T) { } } -// TestMultiSection verfies that a corefile with multiple but different template sections works +// TestMultiSection verifies that a corefile with multiple but different template sections works func TestMultiSection(t *testing.T) { ctx := context.TODO() diff --git a/plugin/test/responsewriter.go b/plugin/test/responsewriter.go index feaa8bd7e22..ce75657ce75 100644 --- a/plugin/test/responsewriter.go +++ b/plugin/test/responsewriter.go @@ -51,10 +51,10 @@ func (t *ResponseWriter) Close() error { return nil } func (t *ResponseWriter) TsigStatus() error { return nil } // TsigTimersOnly implement dns.ResponseWriter interface. -func (t *ResponseWriter) TsigTimersOnly(bool) { return } +func (t *ResponseWriter) TsigTimersOnly(bool) {} // Hijack implement dns.ResponseWriter interface. -func (t *ResponseWriter) Hijack() { return } +func (t *ResponseWriter) Hijack() {} // ResponseWriter6 returns fixed client and remote address in IPv6. The remote // address is always fe80::42:ff:feca:4c65 and port 40212. The local address is always ::1 and port 53. diff --git a/plugin/test/server.go b/plugin/test/server.go deleted file mode 100644 index eb39c7a5b9a..00000000000 --- a/plugin/test/server.go +++ /dev/null @@ -1,52 +0,0 @@ -package test - -import ( - "net" - "sync" - "time" - - "github.com/miekg/dns" -) - -// TCPServer starts a DNS server with a TCP listener on laddr. -func TCPServer(laddr string) (*dns.Server, string, error) { - l, err := net.Listen("tcp", laddr) - if err != nil { - return nil, "", err - } - - server := &dns.Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = func() { waitLock.Unlock() } - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -// UDPServer starts a DNS server with an UDP listener on laddr. -func UDPServer(laddr string) (*dns.Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &dns.Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = func() { waitLock.Unlock() } - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} diff --git a/plugin/tls/OWNERS b/plugin/tls/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/tls/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/tls/README.md b/plugin/tls/README.md index 82d059adeec..40b395c6b8b 100644 --- a/plugin/tls/README.md +++ b/plugin/tls/README.md @@ -11,7 +11,7 @@ or are using gRPC (https://grpc.io/, not an IETF standard). Normally DNS traffic all (DNSSEC only signs resource records). The *tls* "plugin" allows you to configure the cryptographic keys that are needed for both -DNS-over-TLS and DNS-over-gRPC. If the `tls` directive is omitted, then no encryption takes place. +DNS-over-TLS and DNS-over-gRPC. If the *tls* plugin is omitted, then no encryption takes place. The gRPC protobuffer is defined in `pb/dns.proto`. It defines the proto as a simple wrapper for the wire data of a DNS message. diff --git a/plugin/tls/tls.go b/plugin/tls/tls.go index 3231e938176..bfde8cb2a67 100644 --- a/plugin/tls/tls.go +++ b/plugin/tls/tls.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("tls", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("tls", setup) } func setup(c *caddy.Controller) error { err := parseTLS(c) diff --git a/plugin/trace/OWNERS b/plugin/trace/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/trace/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/trace/setup.go b/plugin/trace/setup.go index 87d9e7e2440..64931933a4b 100644 --- a/plugin/trace/setup.go +++ b/plugin/trace/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("trace", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("trace", setup) } func setup(c *caddy.Controller) error { t, err := traceParse(c) diff --git a/plugin/trace/trace.go b/plugin/trace/trace.go index 90b4632e828..5a95186958c 100644 --- a/plugin/trace/trace.go +++ b/plugin/trace/trace.go @@ -20,7 +20,7 @@ import ( "github.com/miekg/dns" ot "github.com/opentracing/opentracing-go" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + zipkin "github.com/openzipkin-contrib/zipkin-go-opentracing" ) const ( diff --git a/plugin/trace/trace_test.go b/plugin/trace/trace_test.go index 48a4d7e78fa..7ea49a2554d 100644 --- a/plugin/trace/trace_test.go +++ b/plugin/trace/trace_test.go @@ -14,8 +14,6 @@ import ( "github.com/opentracing/opentracing-go/mocktracer" ) -const server = "coolServer" - func TestStartup(t *testing.T) { m, err := traceParse(caddy.NewTestController("dns", `trace`)) if err != nil { diff --git a/plugin/transfer/README.md b/plugin/transfer/README.md new file mode 100644 index 00000000000..45ec60c994e --- /dev/null +++ b/plugin/transfer/README.md @@ -0,0 +1,31 @@ +# transfer + +## Name + +*transfer* - perform zone transfers for other plugins. + +## Description + +This plugin answers zone transfers for authoritative plugins that implement +`transfer.Transferer`. + +Transfer answers full zone transfer (AXFR) requests and incremental zone transfer (IXFR) requests +with AXFR fallback if the zone has changed. + +Notifies are not currently supported. + +## Syntax + +~~~ +transfer [ZONE...] { + to HOST... +} +~~~ + +* **ZONES** The zones *transfer* will answer zone requests for. If left blank, + the zones are inherited from the enclosing server block. To answer zone + transfers for a given zone, there must be another plugin in the same server + block that serves the same zone, and implements `transfer.Transferer`. + +* `to ` **HOST...** The hosts *transfer* will transfer to. Use `*` to permit + transfers to all hosts. diff --git a/plugin/transfer/setup.go b/plugin/transfer/setup.go new file mode 100644 index 00000000000..e83fd6d0b0d --- /dev/null +++ b/plugin/transfer/setup.go @@ -0,0 +1,102 @@ +package transfer + +import ( + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + parsepkg "github.com/coredns/coredns/plugin/pkg/parse" + "github.com/coredns/coredns/plugin/pkg/transport" + + "github.com/caddyserver/caddy" +) + +func init() { + caddy.RegisterPlugin("transfer", caddy.Plugin{ + ServerType: "dns", + Action: setup, + }) +} + +func setup(c *caddy.Controller) error { + t, err := parse(c) + + if err != nil { + return plugin.Error("transfer", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + t.Next = next + return t + }) + + c.OnStartup(func() error { + // find all plugins that implement Transferer and add them to Transferers + plugins := dnsserver.GetConfig(c).Handlers() + for _, pl := range plugins { + tr, ok := pl.(Transferer) + if !ok { + continue + } + t.Transferers = append(t.Transferers, tr) + } + return nil + }) + + return nil +} + +func parse(c *caddy.Controller) (*Transfer, error) { + + t := &Transfer{} + for c.Next() { + x := &xfr{} + zones := c.RemainingArgs() + + if len(zones) != 0 { + x.Zones = zones + for i := 0; i < len(x.Zones); i++ { + nzone, err := plugin.Host(x.Zones[i]).MustNormalize() + if err != nil { + return nil, err + } + x.Zones[i] = nzone + } + } else { + x.Zones = make([]string, len(c.ServerBlockKeys)) + for i := 0; i < len(c.ServerBlockKeys); i++ { + nzone, err := plugin.Host(c.ServerBlockKeys[i]).MustNormalize() + if err != nil { + return nil, err + } + x.Zones[i] = nzone + } + } + + for c.NextBlock() { + switch c.Val() { + case "to": + args := c.RemainingArgs() + if len(args) == 0 { + return nil, c.ArgErr() + } + for _, host := range args { + if host == "*" { + x.to = append(x.to, host) + continue + } + normalized, err := parsepkg.HostPort(host, transport.Port) + if err != nil { + return nil, err + } + x.to = append(x.to, normalized) + } + default: + return nil, plugin.Error("transfer", c.Errf("unknown property '%s'", c.Val())) + } + } + if len(x.to) == 0 { + return nil, plugin.Error("transfer", c.Errf("'to' is required", c.Val())) + } + t.xfrs = append(t.xfrs, x) + } + return t, nil +} diff --git a/plugin/transfer/setup_test.go b/plugin/transfer/setup_test.go new file mode 100644 index 00000000000..421910d46bc --- /dev/null +++ b/plugin/transfer/setup_test.go @@ -0,0 +1,85 @@ +package transfer + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestParse(t *testing.T) { + tests := []struct { + input string + shouldErr bool + exp *Transfer + }{ + {`transfer example.net example.org { + to 1.2.3.4 5.6.7.8:1053 [1::2]:34 + } + transfer example.com example.edu { + to * 1.2.3.4 + }`, + false, + &Transfer{ + xfrs: []*xfr{{ + Zones: []string{"example.net.", "example.org."}, + to: []string{"1.2.3.4:53", "5.6.7.8:1053", "[1::2]:34"}, + }, { + Zones: []string{"example.com.", "example.edu."}, + to: []string{"*", "1.2.3.4:53"}, + }}, + }, + }, + // errors + {`transfer example.net example.org { + }`, + true, + nil, + }, + {`transfer example.net example.org { + invalid option + }`, + true, + nil, + }, + } + for i, tc := range tests { + c := caddy.NewTestController("dns", tc.input) + transfer, err := parse(c) + + if err == nil && tc.shouldErr { + t.Fatalf("Test %d expected errors, but got no error", i) + } + if err != nil && !tc.shouldErr { + t.Fatalf("Test %d expected no errors, but got '%v'", i, err) + } + if tc.shouldErr { + continue + } + + if len(tc.exp.xfrs) != len(transfer.xfrs) { + t.Fatalf("Test %d expected %d xfrs, got %d", i, len(tc.exp.xfrs), len(transfer.xfrs)) + } + for j, x := range transfer.xfrs { + // Check Zones + if len(tc.exp.xfrs[j].Zones) != len(x.Zones) { + t.Fatalf("Test %d expected %d zones, got %d", i, len(tc.exp.xfrs[i].Zones), len(x.Zones)) + } + for k, zone := range x.Zones { + if tc.exp.xfrs[j].Zones[k] != zone { + t.Errorf("Test %d expected zone %v, got %v", i, tc.exp.xfrs[j].Zones[k], zone) + + } + } + // Check to + if len(tc.exp.xfrs[j].to) != len(x.to) { + t.Fatalf("Test %d expected %d 'to' values, got %d", i, len(tc.exp.xfrs[i].to), len(x.to)) + } + for k, to := range x.to { + if tc.exp.xfrs[j].to[k] != to { + t.Errorf("Test %d expected %v in 'to', got %v", i, tc.exp.xfrs[j].to[k], to) + + } + } + } + } +} diff --git a/plugin/transfer/transfer.go b/plugin/transfer/transfer.go new file mode 100644 index 00000000000..0bf92ac47a2 --- /dev/null +++ b/plugin/transfer/transfer.go @@ -0,0 +1,181 @@ +package transfer + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + + "github.com/coredns/coredns/plugin" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" +) + +var log = clog.NewWithPlugin("transfer") + +// Transfer is a plugin that handles zone transfers. +type Transfer struct { + Transferers []Transferer // the list of plugins that implement Transferer + xfrs []*xfr + Next plugin.Handler // the next plugin in the chain +} + +type xfr struct { + Zones []string + to []string +} + +// Transferer may be implemented by plugins to enable zone transfers +type Transferer interface { + // Transfer returns a channel to which it writes responses to the transfer request. + // If the plugin is not authoritative for the zone, it should immediately return the + // Transfer.ErrNotAuthoritative error. + // + // If serial is 0, handle as an AXFR request. Transfer should send all records + // in the zone to the channel. The SOA should be written to the channel first, followed + // by all other records, including all NS + glue records. + // + // If serial is not 0, handle as an IXFR request. If the serial is equal to or greater (newer) than + // the current serial for the zone, send a single SOA record to the channel. + // If the serial is less (older) than the current serial for the zone, perform an AXFR fallback + // by proceeding as if an AXFR was requested (as above). + Transfer(zone string, serial uint32) (<-chan []dns.RR, error) +} + +var ( + // ErrNotAuthoritative is returned by Transfer() when the plugin is not authoritative for the zone + ErrNotAuthoritative = errors.New("not authoritative for zone") +) + +// ServeDNS implements the plugin.Handler interface. +func (t Transfer) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + if state.QType() != dns.TypeAXFR && state.QType() != dns.TypeIXFR { + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + // Find the first transfer instance for which the queried zone is a subdomain. + var x *xfr + for _, xfr := range t.xfrs { + zone := plugin.Zones(xfr.Zones).Matches(state.Name()) + if zone == "" { + continue + } + x = xfr + } + if x == nil { + // Requested zone did not match any transfer instance zones. + // Pass request down chain in case later plugins are capable of handling transfer requests themselves. + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + if !x.allowed(state) { + return dns.RcodeRefused, nil + } + + // Get serial from request if this is an IXFR + var serial uint32 + if state.QType() == dns.TypeIXFR { + soa, ok := r.Ns[0].(*dns.SOA) + if !ok { + return dns.RcodeServerFailure, nil + } + serial = soa.Serial + } + + // Get a receiving channel from the first Transferer plugin that returns one + var fromPlugin <-chan []dns.RR + for _, p := range t.Transferers { + var err error + fromPlugin, err = p.Transfer(state.QName(), serial) + if err == ErrNotAuthoritative { + // plugin was not authoritative for the zone, try next plugin + continue + } + if err != nil { + return dns.RcodeServerFailure, err + } + break + } + + if fromPlugin == nil { + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + // Send response to client + ch := make(chan *dns.Envelope) + tr := new(dns.Transfer) + wg := new(sync.WaitGroup) + wg.Add(1) + go func() { + tr.Out(w, r, ch) + wg.Done() + }() + + var soa *dns.SOA + rrs := []dns.RR{} + l := 0 + +receive: + for records := range fromPlugin { + for _, record := range records { + if soa == nil { + if soa = record.(*dns.SOA); soa == nil { + break receive + } + serial = soa.Serial + } + rrs = append(rrs, record) + if len(rrs) > 500 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} + } + } + } + + if len(rrs) > 0 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} + } + + if soa != nil { + ch <- &dns.Envelope{RR: []dns.RR{soa}} // closing SOA. + l++ + } + + close(ch) // Even though we close the channel here, we still have + wg.Wait() // to wait before we can return and close the connection. + + if soa == nil { + return dns.RcodeServerFailure, fmt.Errorf("first record in zone %s is not SOA", state.QName()) + } + + log.Infof("Outgoing transfer of %d records of zone %s to %s with %d SOA serial", l, state.QName(), state.IP(), serial) + return dns.RcodeSuccess, nil +} + +func (x xfr) allowed(state request.Request) bool { + for _, h := range x.to { + if h == "*" { + return true + } + to, _, err := net.SplitHostPort(h) + if err != nil { + return false + } + // If remote IP matches we accept. + remote := state.IP() + if to == remote { + return true + } + } + return false +} + +// Name implements the Handler interface. +func (Transfer) Name() string { return "transfer" } diff --git a/plugin/transfer/transfer_test.go b/plugin/transfer/transfer_test.go new file mode 100644 index 00000000000..8dce4c6e132 --- /dev/null +++ b/plugin/transfer/transfer_test.go @@ -0,0 +1,291 @@ +package transfer + +import ( + "context" + "fmt" + "testing" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/test" + + "github.com/miekg/dns" +) + +// transfererPlugin implements transfer.Transferer and plugin.Handler +type transfererPlugin struct { + Zone string + Serial uint32 + Next plugin.Handler +} + +// Name implements plugin.Handler +func (transfererPlugin) Name() string { return "transfererplugin" } + +// ServeDNS implements plugin.Handler +func (p transfererPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if r.Question[0].Name != p.Zone { + return p.Next.ServeDNS(ctx, w, r) + } + return 0, nil +} + +// Transfer implements transfer.Transferer - it returns a static AXFR response, or +// if serial is current, an abbreviated IXFR response +func (p transfererPlugin) Transfer(zone string, serial uint32) (<-chan []dns.RR, error) { + if zone != p.Zone { + return nil, ErrNotAuthoritative + } + ch := make(chan []dns.RR, 2) + defer close(ch) + ch <- []dns.RR{test.SOA(fmt.Sprintf("%s 100 IN SOA ns.dns.%s hostmaster.%s %d 7200 1800 86400 100", p.Zone, p.Zone, p.Zone, p.Serial))} + if serial >= p.Serial { + return ch, nil + } + ch <- []dns.RR{ + test.NS(fmt.Sprintf("%s 100 IN NS ns.dns.%s", p.Zone, p.Zone)), + test.A(fmt.Sprintf("ns.dns.%s 100 IN A 1.2.3.4", p.Zone)), + } + return ch, nil +} + +type terminatingPlugin struct{} + +// Name implements plugin.Handler +func (terminatingPlugin) Name() string { return "testplugin" } + +// ServeDNS implements plugin.Handler that returns NXDOMAIN for all requests +func (terminatingPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + m := new(dns.Msg) + m.SetRcode(r, dns.RcodeNameError) + w.WriteMsg(m) + return dns.RcodeNameError, nil +} + +func newTestTransfer() Transfer { + nextPlugin1 := transfererPlugin{Zone: "example.com.", Serial: 12345} + nextPlugin2 := transfererPlugin{Zone: "example.org.", Serial: 12345} + nextPlugin2.Next = terminatingPlugin{} + nextPlugin1.Next = nextPlugin2 + + transfer := Transfer{ + Transferers: []Transferer{nextPlugin1, nextPlugin2}, + xfrs: []*xfr{ + { + Zones: []string{"example.org."}, + to: []string{"*"}, + }, + { + Zones: []string{"example.com."}, + to: []string{"*"}, + }, + }, + Next: nextPlugin1, + } + return transfer +} + +func TestTransferNonZone(t *testing.T) { + + transfer := newTestTransfer() + ctx := context.TODO() + + for _, tc := range []string{"sub.example.org.", "example.test."} { + w := dnstest.NewRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(tc) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if w.Msg == nil { + t.Fatalf("Got nil message for AXFR %s", tc) + } + + if w.Msg.Rcode != dns.RcodeNameError { + t.Errorf("Expected NXDOMAIN for AXFR %s got %s", tc, dns.RcodeToString[w.Msg.Rcode]) + } + } +} + +func TestTransferNotAXFRorIXFR(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetQuestion("test.domain.", dns.TypeA) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if w.Msg == nil { + t.Fatal("Got nil message") + } + + if w.Msg.Rcode != dns.RcodeNameError { + t.Errorf("Expected NXDOMAIN got %s", dns.RcodeToString[w.Msg.Rcode]) + } +} + +func TestTransferAXFRExampleOrg(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[0].Zones[0]) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferAXFRExampleCom(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[1].Zones[0]) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferIXFRFallback(t *testing.T) { + + transfer := newTestTransfer() + + testPlugin := transfer.Transferers[0].(transfererPlugin) + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetIxfr( + transfer.xfrs[0].Zones[0], + testPlugin.Serial-1, + "ns.dns."+testPlugin.Zone, + "hostmaster.dns."+testPlugin.Zone, + ) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferIXFRCurrent(t *testing.T) { + + transfer := newTestTransfer() + + testPlugin := transfer.Transferers[0].(transfererPlugin) + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetIxfr( + transfer.xfrs[0].Zones[0], + testPlugin.Serial, + "ns.dns."+testPlugin.Zone, + "hostmaster.dns."+testPlugin.Zone, + ) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if len(w.Msgs) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back a zone response") + } + + if len(w.Msgs[0].Answer) != 1 { + t.Logf("%+v\n", w) + t.Fatalf("Expected 1 answer, got %d", len(w.Msgs[0].Answer)) + } + + // Ensure the answer is the SOA + if w.Msgs[0].Answer[0].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not contain the SOA record") + } +} + +func validateAXFRResponse(t *testing.T, w *dnstest.MultiRecorder) { + if len(w.Msgs) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back a zone response") + } + + if len(w.Msgs[0].Answer) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back an answer") + } + + // Ensure the answer starts with SOA + if w.Msgs[0].Answer[0].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not start with SOA record") + } + + // Ensure the answer ends with SOA + if w.Msgs[len(w.Msgs)-1].Answer[len(w.Msgs[len(w.Msgs)-1].Answer)-1].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not end with SOA record") + } + + // Ensure the answer is the expected length + c := 0 + for _, m := range w.Msgs { + c += len(m.Answer) + } + if c != 4 { + t.Errorf("Answer is not the expected length (expected 4, got %d)", c) + } +} + +func TestTransferNotAllowed(t *testing.T) { + nextPlugin := transfererPlugin{Zone: "example.org.", Serial: 12345} + + transfer := Transfer{ + Transferers: []Transferer{nextPlugin}, + xfrs: []*xfr{ + { + Zones: []string{"example.org."}, + to: []string{"1.2.3.4"}, + }, + }, + Next: nextPlugin, + } + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[0].Zones[0]) + + rcode, err := transfer.ServeDNS(ctx, w, dnsmsg) + + if err != nil { + t.Error(err) + } + + if rcode != dns.RcodeRefused { + t.Errorf("Expected REFUSED response code, got %s", dns.RcodeToString[rcode]) + } + +} diff --git a/plugin/whoami/OWNERS b/plugin/whoami/OWNERS deleted file mode 100644 index 3a4ef23a1f6..00000000000 --- a/plugin/whoami/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - chrisohaver -approvers: - - miekg - - chrisohaver diff --git a/plugin/whoami/README.md b/plugin/whoami/README.md index 7417132cc1d..55d0388d737 100644 --- a/plugin/whoami/README.md +++ b/plugin/whoami/README.md @@ -34,7 +34,7 @@ whoami Start a server on the default port and load the *whoami* plugin. ~~~ corefile -. { +example.org { whoami } ~~~ diff --git a/plugin/whoami/fuzz.go b/plugin/whoami/fuzz.go index d9bbcee2bd6..70f2bd6021b 100644 --- a/plugin/whoami/fuzz.go +++ b/plugin/whoami/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package whoami diff --git a/plugin/whoami/setup.go b/plugin/whoami/setup.go index f0ae1e6d662..6c19b72d6b6 100644 --- a/plugin/whoami/setup.go +++ b/plugin/whoami/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("whoami", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("whoami", setup) } func setup(c *caddy.Controller) error { c.Next() // 'whoami' diff --git a/request/request.go b/request/request.go index 0ec98b310eb..7374b0bd69f 100644 --- a/request/request.go +++ b/request/request.go @@ -18,15 +18,16 @@ type Request struct { // Optional lowercased zone of this query. Zone string - // Cache size after first call to Size or Do. - size int - do *bool // nil: nothing, otherwise *do value + // Cache size after first call to Size or Do. If size is zero nothing has been cached yet. + // Both Size and Do set these values (and cache them). + size uint16 // UDP buffer size, or 64K in case of TCP. + do bool // DNSSEC OK value // Caches + family int8 // transport's family. name string // lowercase qname. ip string // client's ip. port string // client's port. - family int // transport's family. localPort string // server's port. localIP string // server's ip. } @@ -110,15 +111,11 @@ func (r *Request) RemoteAddr() string { return r.W.RemoteAddr().String() } func (r *Request) LocalAddr() string { return r.W.LocalAddr().String() } // Proto gets the protocol used as the transport. This will be udp or tcp. -func (r *Request) Proto() string { return Proto(r.W) } - -// Proto gets the protocol used as the transport. This will be udp or tcp. -func Proto(w dns.ResponseWriter) string { - // FIXME(miek): why not a method on Request - if _, ok := w.RemoteAddr().(*net.UDPAddr); ok { +func (r *Request) Proto() string { + if _, ok := r.W.RemoteAddr().(*net.UDPAddr); ok { return "udp" } - if _, ok := w.RemoteAddr().(*net.TCPAddr); ok { + if _, ok := r.W.RemoteAddr().(*net.TCPAddr); ok { return "tcp" } return "udp" @@ -127,7 +124,7 @@ func Proto(w dns.ResponseWriter) string { // Family returns the family of the transport, 1 for IPv4 and 2 for IPv6. func (r *Request) Family() int { if r.family != 0 { - return r.family + return int(r.family) } var a net.IP @@ -141,26 +138,20 @@ func (r *Request) Family() int { if a.To4() != nil { r.family = 1 - return r.family + return 1 } r.family = 2 - return r.family + return 2 } // Do returns if the request has the DO (DNSSEC OK) bit set. func (r *Request) Do() bool { - if r.do != nil { - return *r.do + if r.size != 0 { + return r.do } - r.do = new(bool) - - if o := r.Req.IsEdns0(); o != nil { - *r.do = o.Do() - return *r.do - } - *r.do = false - return false + r.Size() + return r.do } // Len returns the length in bytes in the request. @@ -170,21 +161,19 @@ func (r *Request) Len() int { return r.Req.Len() } // Or when the request was over TCP, we return the maximum allowed size of 64K. func (r *Request) Size() int { if r.size != 0 { - return r.size + return int(r.size) } - size := 0 + size := uint16(0) if o := r.Req.IsEdns0(); o != nil { - if r.do == nil { - r.do = new(bool) - } - *r.do = o.Do() - size = int(o.UDPSize()) + r.do = o.Do() + size = o.UDPSize() } + // normalize size size = edns.Size(r.Proto(), size) r.size = size - return size + return int(size) } // SizeAndDo adds an OPT record that the reflects the intent from request. @@ -226,26 +215,21 @@ func (r *Request) SizeAndDo(m *dns.Msg) bool { // Scrub scrubs the reply message so that it will fit the client's buffer. It will first // check if the reply fits without compression and then *with* compression. -// Scrub will then use binary search to find a save cut off point in the additional section. -// If even *without* the additional section the reply still doesn't fit we -// repeat this process for the answer section. If we scrub the answer section -// we set the TC bit on the reply; indicating the client should retry over TCP. // Note, the TC bit will be set regardless of protocol, even TCP message will // get the bit, the client should then retry with pigeons. func (r *Request) Scrub(reply *dns.Msg) *dns.Msg { - size := r.Size() + reply.Truncate(r.Size()) - reply.Compress = false - rl := reply.Len() - if size >= rl { - if r.Proto() != "udp" { - return reply - } + if reply.Compress { + return reply + } + if r.Proto() == "udp" { + rl := reply.Len() // Last ditch attempt to avoid fragmentation, if the size is bigger than the v4/v6 UDP fragmentation // limit and sent via UDP compress it (in the hope we go under that limit). Limits taken from NSD: // - // .., 1480 (EDNS/IPv4), 1220 (EDNS/IPv6), or the advertized EDNS buffer size if that is + // .., 1480 (EDNS/IPv4), 1220 (EDNS/IPv6), or the advertised EDNS buffer size if that is // smaller than the EDNS default. // See: https://open.nlnetlabs.nl/pipermail/nsd-users/2011-November/001278.html if rl > 1480 && r.Family() == 1 { @@ -254,91 +238,8 @@ func (r *Request) Scrub(reply *dns.Msg) *dns.Msg { if rl > 1220 && r.Family() == 2 { reply.Compress = true } - - return reply } - reply.Compress = true - rl = reply.Len() - if size >= rl { - return reply - } - - // Account for the OPT record that gets added in SizeAndDo(), subtract that length. - re := len(reply.Extra) - if r.Req.IsEdns0() != nil { - size -= optLen - // re can never be 0 because we have an OPT RR. - re-- - } - - l, m := 0, 0 - origExtra := reply.Extra - for l <= re { - m = (l + re) / 2 - reply.Extra = origExtra[:m] - rl = reply.Len() - if rl < size { - l = m + 1 - continue - } - if rl > size { - re = m - 1 - continue - } - if rl == size { - break - } - } - - // The binary search only breaks on an exact match, which will be - // pretty rare. Normally, the loop will exit when l > re, meaning that - // in the previous iteration either: - // rl < size: no need to do anything. - // rl > size: the final size is too large, and if m > 0, the preceding - // iteration the size was too small. Select that preceding size. - if rl > size && m > 0 { - reply.Extra = origExtra[:m-1] - rl = reply.Len() - } - - if rl <= size { - return reply - } - - ra := len(reply.Answer) - l, m = 0, 0 - origAnswer := reply.Answer - for l <= ra { - m = (l + ra) / 2 - reply.Answer = origAnswer[:m] - rl = reply.Len() - if rl < size { - l = m + 1 - continue - } - if rl > size { - ra = m - 1 - continue - } - if rl == size { - break - } - } - - // The binary search only breaks on an exact match, which will be - // pretty rare. Normally, the loop will exit when l > ra, meaning that - // in the previous iteration either: - // rl < size: no need to do anything. - // rl > size: the final size is too large, and if m > 0, the preceding - // iteration the size was too small. Select that preceding size. - if rl > size && m > 0 { - reply.Answer = origAnswer[:m-1] - // No need to recalc length, as we don't use it. We set truncated anyway. Doing - // this extra m-1 step does make it fit in the client's buffer however. - } - - reply.Truncated = true return reply } @@ -446,7 +347,7 @@ func (r *Request) Match(reply *dns.Msg) bool { return false } - if reply.Response == false { + if !reply.Response { return false } @@ -460,5 +361,3 @@ func (r *Request) Match(reply *dns.Msg) bool { return true } - -const optLen = 12 // OPT record length. diff --git a/request/request_test.go b/request/request_test.go index 4411c6a8246..0a3b1f2d8dc 100644 --- a/request/request_test.go +++ b/request/request_test.go @@ -13,7 +13,7 @@ func TestRequestDo(t *testing.T) { st := testRequest() st.Do() - if st.do == nil { + if !st.do { t.Errorf("Expected st.do to be set") } } @@ -98,8 +98,8 @@ func TestRequestScrubExtra(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } @@ -120,8 +120,8 @@ func TestRequestScrubExtraEdns0(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } @@ -146,8 +146,8 @@ func TestRequestScrubExtraRegression(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } diff --git a/request/writer.go b/request/writer.go index 6caba0c2e62..587b3b5d811 100644 --- a/request/writer.go +++ b/request/writer.go @@ -15,8 +15,7 @@ func NewScrubWriter(req *dns.Msg, w dns.ResponseWriter) *ScrubWriter { return &S // scrub on the message m and will then write it to the client. func (s *ScrubWriter) WriteMsg(m *dns.Msg) error { state := Request{Req: s.req, W: s.ResponseWriter} - - n := state.Scrub(m) - state.SizeAndDo(n) - return s.ResponseWriter.WriteMsg(n) + state.SizeAndDo(m) + state.Scrub(m) + return s.ResponseWriter.WriteMsg(m) } diff --git a/test/auto_test.go b/test/auto_test.go index 4d9b70a1c54..07e2af12ddc 100644 --- a/test/auto_test.go +++ b/test/auto_test.go @@ -129,9 +129,9 @@ func TestAutoAXFR(t *testing.T) { t.Fatalf("Could not get CoreDNS serving instance: %s", err) } - udp, _ := CoreDNSServerPorts(i, 0) - if udp == "" { - t.Fatal("Could not get UDP listening port") + _, tcp := CoreDNSServerPorts(i, 0) + if tcp == "" { + t.Fatal("Could not get TCP listening port") } defer i.Stop() @@ -142,14 +142,20 @@ func TestAutoAXFR(t *testing.T) { time.Sleep(1100 * time.Millisecond) // wait for it to be picked up + tr := new(dns.Transfer) m := new(dns.Msg) m.SetAxfr("example.org.") - resp, err := dns.Exchange(m, udp) + c, err := tr.In(m, tcp) if err != nil { t.Fatal("Expected to receive reply, but didn't") } - if len(resp.Answer) != 5 { - t.Fatalf("Expected response with %d RRs, got %d", 5, len(resp.Answer)) + l := 0 + for e := range c { + l += len(e.RR) + } + + if l != 5 { + t.Fatalf("Expected response with %d RRs, got %d", 5, l) } } diff --git a/test/doc.go b/test/doc.go index ba09e877248..528092a857b 100644 --- a/test/doc.go +++ b/test/doc.go @@ -1,2 +1,2 @@ -// Package test contains function and types useful for writing tests +// Package test contains function and types useful for writing tests. package test diff --git a/test/ds_file_test.go b/test/ds_file_test.go index 7d4ae0cdc6c..84b383368bc 100644 --- a/test/ds_file_test.go +++ b/test/ds_file_test.go @@ -8,7 +8,7 @@ import ( "github.com/miekg/dns" ) -// Using miek.nl here because this is the easiest zone to get access to and it's masters +// Using miek.nl here because this is the easiest zone to get access to and its masters // run both NSD and BIND9, making checks like "what should we actually return" super easy. var dsTestCases = []mtest.Case{ { diff --git a/test/etcd_test.go b/test/etcd_test.go index 2ac165beb37..f47c7206556 100644 --- a/test/etcd_test.go +++ b/test/etcd_test.go @@ -11,8 +11,8 @@ import ( "github.com/coredns/coredns/plugin/etcd" "github.com/coredns/coredns/plugin/etcd/msg" - etcdcv3 "github.com/coreos/etcd/clientv3" "github.com/miekg/dns" + etcdcv3 "go.etcd.io/etcd/clientv3" ) func etcdPlugin() *etcd.Etcd { diff --git a/test/example_test.go b/test/example_test.go index 852143fe8a2..9d8dec8329e 100644 --- a/test/example_test.go +++ b/test/example_test.go @@ -2,14 +2,15 @@ package test const exampleOrg = `; example.org test file $TTL 3600 -example.org. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600 -example.org. IN NS b.iana-servers.net. -example.org. IN NS a.iana-servers.net. -example.org. IN A 127.0.0.1 -example.org. IN A 127.0.0.2 -short.example.org. 1 IN A 127.0.0.3 -*.w.example.org. IN TXT "Wildcard" -a.b.c.w.example.org. IN TXT "Not a wildcard" -cname.example.org. IN CNAME www.example.net. -service.example.org. IN SRV 8080 10 10 example.org. +@ IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600 +@ IN NS b.iana-servers.net. +@ IN NS a.iana-servers.net. +@ IN A 127.0.0.1 +@ IN A 127.0.0.2 +short 1 IN A 127.0.0.3 + +*.w 3600 IN TXT "Wildcard" +a.b.c.w IN TXT "Not a wildcard" +cname IN CNAME www.example.net. +service IN SRV 8080 10 10 @ ` diff --git a/test/external_test.go b/test/external_test.go deleted file mode 100644 index 8e6adab9123..00000000000 --- a/test/external_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package test - -import ( - "fmt" - "os" - "os/exec" - "strings" - "testing" -) - -// Go get external example plugin, compile it into CoreDNS -// and check if it is really there, but running coredns -plugins. - -// Dangerous test as it messes with your git tree, maybe use tag? -func testExternalPluginCompile(t *testing.T) { - if err := addExamplePlugin(); err != nil { - t.Fatal(err) - } - defer run(t, gitReset) - - if _, err := run(t, goGet); err != nil { - t.Fatal(err) - } - - if _, err := run(t, goGen); err != nil { - t.Fatal(err) - } - - if _, err := run(t, goBuild); err != nil { - t.Fatal(err) - } - - out, err := run(t, coredns) - if err != nil { - t.Fatal(err) - } - - if !strings.Contains(string(out), "dns.example") { - t.Fatal("Plugin dns.example should be there") - } -} - -func run(t *testing.T, c *exec.Cmd) ([]byte, error) { - c.Dir = ".." - out, err := c.Output() - if err != nil { - return nil, fmt.Errorf("run: failed to run %s %s: %q", c.Args[0], c.Args[1], err) - } - return out, nil - -} - -func addExamplePlugin() error { - f, err := os.OpenFile("../plugin.cfg", os.O_APPEND|os.O_WRONLY, os.ModeAppend) - if err != nil { - return err - } - defer f.Close() - - _, err = f.WriteString(example) - return err -} - -var ( - goBuild = exec.Command("go", "build") - goGen = exec.Command("go", "generate") - goGet = exec.Command("go", "get", "github.com/coredns/example") - gitReset = exec.Command("git", "checkout", "core/*") - coredns = exec.Command("./coredns", "-plugins") -) - -const example = "1001:example:github.com/coredns/example" diff --git a/test/file_reload_test.go b/test/file_reload_test.go index 91372ecc302..e61003b8bee 100644 --- a/test/file_reload_test.go +++ b/test/file_reload_test.go @@ -5,15 +5,12 @@ import ( "testing" "time" - "github.com/coredns/coredns/plugin/file" "github.com/coredns/coredns/plugin/test" "github.com/miekg/dns" ) func TestZoneReload(t *testing.T) { - file.TickTime = 1 * time.Second - name, rm, err := test.TempFile(".", exampleOrg) if err != nil { t.Fatalf("Failed to create zone: %s", err) diff --git a/test/fuzz_corefile.go b/test/fuzz_corefile.go index 0abb9d6b3b4..a1a7e6a368d 100644 --- a/test/fuzz_corefile.go +++ b/test/fuzz_corefile.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package test diff --git a/test/grpc_test.go b/test/grpc_test.go index fe273e78900..bb49e6b9bac 100644 --- a/test/grpc_test.go +++ b/test/grpc_test.go @@ -22,7 +22,9 @@ func TestGrpc(t *testing.T) { } defer g.Stop() - conn, err := grpc.Dial(tcp, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(5*time.Second)) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + conn, err := grpc.DialContext(ctx, tcp, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatalf("Expected no error but got: %s", err) } diff --git a/test/plugin_test.go b/test/plugin_test.go deleted file mode 100644 index 4003a958fe3..00000000000 --- a/test/plugin_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package test - -import ( - "testing" - - "github.com/coredns/coredns/plugin/test" - - "github.com/miekg/dns" - - // Load all managed plugins in github.com/coredns/coredns - _ "github.com/coredns/coredns/core/plugin" -) - -func benchmarkLookupBalanceRewriteCache(b *testing.B) { - t := new(testing.T) - name, rm, err := test.TempFile(".", exampleOrg) - if err != nil { - t.Fatalf("Failed to create zone: %s", err) - } - defer rm() - - corefile := `example.org:0 { - file ` + name + ` - rewrite type ANY HINFO - loadbalance -} -` - - ex, udp, _, err := CoreDNSServerAndPorts(corefile) - if err != nil { - t.Fatalf("Could not get CoreDNS serving instance: %s", err) - } - defer ex.Stop() - - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("example.org.", dns.TypeA) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, udp) - } -} diff --git a/test/reload_test.go b/test/reload_test.go index e026a97cead..f17a85b050c 100644 --- a/test/reload_test.go +++ b/test/reload_test.go @@ -110,7 +110,7 @@ func TestReloadMetricsHealth(t *testing.T) { } ok, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() - if string(ok) != "OK" { + if string(ok) != http.StatusText(http.StatusOK) { t.Errorf("Failed to receive OK, got %s", ok) } diff --git a/test/secondary_test.go b/test/secondary_test.go index 685e23e613e..1d65dfb2a3d 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -71,15 +71,61 @@ func TestSecondaryZoneTransfer(t *testing.T) { m.SetQuestion("example.org.", dns.TypeSOA) var r *dns.Msg - // This is now async; we we need to wait for it to be transfered. + // This is now async; we need to wait for it to be transferred. for i := 0; i < 10; i++ { r, _ = dns.Exchange(m, udp) - if len(r.Answer) == 0 { + if len(r.Answer) != 0 { break } time.Sleep(100 * time.Microsecond) } - if len(r.Answer) != 0 { + if len(r.Answer) == 0 { t.Fatalf("Expected answer section") } } + +func TestIxfrResponse(t *testing.T) { + // ixfr query with current soa should return single packet with that soa (no transfer needed). + name, rm, err := test.TempFile(".", exampleOrg) + if err != nil { + t.Fatalf("Failed to create zone: %s", err) + } + defer rm() + + corefile := `example.org:0 { + file ` + name + ` { + transfer to * + } +} +` + + i, udp, _, err := CoreDNSServerAndPorts(corefile) + if err != nil { + t.Fatalf("Could not get CoreDNS serving instance: %s", err) + } + defer i.Stop() + + m := new(dns.Msg) + m.SetQuestion("example.org.", dns.TypeIXFR) + m.Ns = []dns.RR{test.SOA("example.org. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600")} // copied from exampleOrg + + var r *dns.Msg + // This is now async; we need to wait for it to be transferred. + for i := 0; i < 10; i++ { + r, _ = dns.Exchange(m, udp) + if len(r.Answer) != 0 { + break + } + time.Sleep(100 * time.Microsecond) + } + if len(r.Answer) != 1 { + t.Fatalf("Expected answer section with single RR") + } + soa, ok := r.Answer[0].(*dns.SOA) + if !ok { + t.Fatalf("Expected answer section with SOA RR") + } + if soa.Serial != 2015082541 { + t.Fatalf("Serial should be %d, got %d", 2015082541, soa.Serial) + } +} diff --git a/test/server.go b/test/server.go index 93016501dc0..9908285d135 100644 --- a/test/server.go +++ b/test/server.go @@ -4,9 +4,10 @@ import ( "sync" "github.com/coredns/coredns/core/dnsserver" - // Hook in CoreDNS. _ "github.com/coredns/coredns/core" + // Load all managed plugins in github.com/coredns/coredns + _ "github.com/coredns/coredns/core/plugin" "github.com/caddyserver/caddy" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 00000000000..af39a91e703 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000000..2d1d72608c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go new file mode 100644 index 00000000000..bb47c7dee04 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go @@ -0,0 +1,138 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package dns + +import ( + "context" + + original "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns" +) + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type RecordType = original.RecordType + +const ( + A RecordType = original.A + AAAA RecordType = original.AAAA + CAA RecordType = original.CAA + CNAME RecordType = original.CNAME + MX RecordType = original.MX + NS RecordType = original.NS + PTR RecordType = original.PTR + SOA RecordType = original.SOA + SRV RecordType = original.SRV + TXT RecordType = original.TXT +) + +type ZoneType = original.ZoneType + +const ( + Private ZoneType = original.Private + Public ZoneType = original.Public +) + +type ARecord = original.ARecord +type AaaaRecord = original.AaaaRecord +type BaseClient = original.BaseClient +type CaaRecord = original.CaaRecord +type CloudError = original.CloudError +type CloudErrorBody = original.CloudErrorBody +type CnameRecord = original.CnameRecord +type MxRecord = original.MxRecord +type NsRecord = original.NsRecord +type PtrRecord = original.PtrRecord +type RecordSet = original.RecordSet +type RecordSetListResult = original.RecordSetListResult +type RecordSetListResultIterator = original.RecordSetListResultIterator +type RecordSetListResultPage = original.RecordSetListResultPage +type RecordSetProperties = original.RecordSetProperties +type RecordSetUpdateParameters = original.RecordSetUpdateParameters +type RecordSetsClient = original.RecordSetsClient +type Resource = original.Resource +type ResourceReference = original.ResourceReference +type ResourceReferenceClient = original.ResourceReferenceClient +type ResourceReferenceRequest = original.ResourceReferenceRequest +type ResourceReferenceRequestProperties = original.ResourceReferenceRequestProperties +type ResourceReferenceResult = original.ResourceReferenceResult +type ResourceReferenceResultProperties = original.ResourceReferenceResultProperties +type SoaRecord = original.SoaRecord +type SrvRecord = original.SrvRecord +type SubResource = original.SubResource +type TxtRecord = original.TxtRecord +type Zone = original.Zone +type ZoneListResult = original.ZoneListResult +type ZoneListResultIterator = original.ZoneListResultIterator +type ZoneListResultPage = original.ZoneListResultPage +type ZoneProperties = original.ZoneProperties +type ZoneUpdate = original.ZoneUpdate +type ZonesClient = original.ZonesClient +type ZonesDeleteFuture = original.ZonesDeleteFuture + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator { + return original.NewRecordSetListResultIterator(page) +} +func NewRecordSetListResultPage(getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage { + return original.NewRecordSetListResultPage(getNextPage) +} +func NewRecordSetsClient(subscriptionID string) RecordSetsClient { + return original.NewRecordSetsClient(subscriptionID) +} +func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { + return original.NewRecordSetsClientWithBaseURI(baseURI, subscriptionID) +} +func NewResourceReferenceClient(subscriptionID string) ResourceReferenceClient { + return original.NewResourceReferenceClient(subscriptionID) +} +func NewResourceReferenceClientWithBaseURI(baseURI string, subscriptionID string) ResourceReferenceClient { + return original.NewResourceReferenceClientWithBaseURI(baseURI, subscriptionID) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func NewZoneListResultIterator(page ZoneListResultPage) ZoneListResultIterator { + return original.NewZoneListResultIterator(page) +} +func NewZoneListResultPage(getNextPage func(context.Context, ZoneListResult) (ZoneListResult, error)) ZoneListResultPage { + return original.NewZoneListResultPage(getNextPage) +} +func NewZonesClient(subscriptionID string) ZonesClient { + return original.NewZonesClient(subscriptionID) +} +func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient { + return original.NewZonesClientWithBaseURI(baseURI, subscriptionID) +} +func PossibleRecordTypeValues() []RecordType { + return original.PossibleRecordTypeValues() +} +func PossibleZoneTypeValues() []ZoneType { + return original.PossibleZoneTypeValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/latest" +} +func Version() string { + return original.Version() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go new file mode 100644 index 00000000000..cf17ca134ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go @@ -0,0 +1,51 @@ +// Package dns implements the Azure ARM Dns service API version 2018-05-01. +// +// The DNS Management Client. +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Dns + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Dns. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go new file mode 100644 index 00000000000..b9b4c627c0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go @@ -0,0 +1,943 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns" + +// RecordType enumerates the values for record type. +type RecordType string + +const ( + // A ... + A RecordType = "A" + // AAAA ... + AAAA RecordType = "AAAA" + // CAA ... + CAA RecordType = "CAA" + // CNAME ... + CNAME RecordType = "CNAME" + // MX ... + MX RecordType = "MX" + // NS ... + NS RecordType = "NS" + // PTR ... + PTR RecordType = "PTR" + // SOA ... + SOA RecordType = "SOA" + // SRV ... + SRV RecordType = "SRV" + // TXT ... + TXT RecordType = "TXT" +) + +// PossibleRecordTypeValues returns an array of possible values for the RecordType const type. +func PossibleRecordTypeValues() []RecordType { + return []RecordType{A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SRV, TXT} +} + +// ZoneType enumerates the values for zone type. +type ZoneType string + +const ( + // Private ... + Private ZoneType = "Private" + // Public ... + Public ZoneType = "Public" +) + +// PossibleZoneTypeValues returns an array of possible values for the ZoneType const type. +func PossibleZoneTypeValues() []ZoneType { + return []ZoneType{Private, Public} +} + +// AaaaRecord an AAAA record. +type AaaaRecord struct { + // Ipv6Address - The IPv6 address of this AAAA record. + Ipv6Address *string `json:"ipv6Address,omitempty"` +} + +// ARecord an A record. +type ARecord struct { + // Ipv4Address - The IPv4 address of this A record. + Ipv4Address *string `json:"ipv4Address,omitempty"` +} + +// CaaRecord a CAA record. +type CaaRecord struct { + // Flags - The flags for this CAA record as an integer between 0 and 255. + Flags *int32 `json:"flags,omitempty"` + // Tag - The tag for this CAA record. + Tag *string `json:"tag,omitempty"` + // Value - The value for this CAA record. + Value *string `json:"value,omitempty"` +} + +// CloudError an error message +type CloudError struct { + // Error - The error message body + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody the body of an error message +type CloudErrorBody struct { + // Code - The error code + Code *string `json:"code,omitempty"` + // Message - A description of what caused the error + Message *string `json:"message,omitempty"` + // Target - The target resource of the error message + Target *string `json:"target,omitempty"` + // Details - Extra error information + Details *[]CloudErrorBody `json:"details,omitempty"` +} + +// CnameRecord a CNAME record. +type CnameRecord struct { + // Cname - The canonical name for this CNAME record. + Cname *string `json:"cname,omitempty"` +} + +// MxRecord an MX record. +type MxRecord struct { + // Preference - The preference value for this MX record. + Preference *int32 `json:"preference,omitempty"` + // Exchange - The domain name of the mail host for this MX record. + Exchange *string `json:"exchange,omitempty"` +} + +// NsRecord an NS record. +type NsRecord struct { + // Nsdname - The name server name for this NS record. + Nsdname *string `json:"nsdname,omitempty"` +} + +// PtrRecord a PTR record. +type PtrRecord struct { + // Ptrdname - The PTR target domain name for this PTR record. + Ptrdname *string `json:"ptrdname,omitempty"` +} + +// RecordSet describes a DNS record set (a collection of DNS records with the same name and type). +type RecordSet struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the record set. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the record set. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the record set. + Type *string `json:"type,omitempty"` + // Etag - The etag of the record set. + Etag *string `json:"etag,omitempty"` + // RecordSetProperties - The properties of the record set. + *RecordSetProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecordSet. +func (rs RecordSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rs.Etag != nil { + objectMap["etag"] = rs.Etag + } + if rs.RecordSetProperties != nil { + objectMap["properties"] = rs.RecordSetProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RecordSet struct. +func (rs *RecordSet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rs.Type = &typeVar + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + rs.Etag = &etag + } + case "properties": + if v != nil { + var recordSetProperties RecordSetProperties + err = json.Unmarshal(*v, &recordSetProperties) + if err != nil { + return err + } + rs.RecordSetProperties = &recordSetProperties + } + } + } + + return nil +} + +// RecordSetListResult the response to a record set List operation. +type RecordSetListResult struct { + autorest.Response `json:"-"` + // Value - Information about the record sets in the response. + Value *[]RecordSet `json:"value,omitempty"` + // NextLink - READ-ONLY; The continuation token for the next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RecordSetListResultIterator provides access to a complete listing of RecordSet values. +type RecordSetListResultIterator struct { + i int + page RecordSetListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RecordSetListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RecordSetListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RecordSetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RecordSetListResultIterator) Response() RecordSetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RecordSetListResultIterator) Value() RecordSet { + if !iter.page.NotDone() { + return RecordSet{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RecordSetListResultIterator type. +func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator { + return RecordSetListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rslr RecordSetListResult) IsEmpty() bool { + return rslr.Value == nil || len(*rslr.Value) == 0 +} + +// recordSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rslr RecordSetListResult) recordSetListResultPreparer(ctx context.Context) (*http.Request, error) { + if rslr.NextLink == nil || len(to.String(rslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rslr.NextLink))) +} + +// RecordSetListResultPage contains a page of RecordSet values. +type RecordSetListResultPage struct { + fn func(context.Context, RecordSetListResult) (RecordSetListResult, error) + rslr RecordSetListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RecordSetListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rslr) + if err != nil { + return err + } + page.rslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RecordSetListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RecordSetListResultPage) NotDone() bool { + return !page.rslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RecordSetListResultPage) Response() RecordSetListResult { + return page.rslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RecordSetListResultPage) Values() []RecordSet { + if page.rslr.IsEmpty() { + return nil + } + return *page.rslr.Value +} + +// Creates a new instance of the RecordSetListResultPage type. +func NewRecordSetListResultPage(getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage { + return RecordSetListResultPage{fn: getNextPage} +} + +// RecordSetProperties represents the properties of the records in the record set. +type RecordSetProperties struct { + // Metadata - The metadata attached to the record set. + Metadata map[string]*string `json:"metadata"` + // TTL - The TTL (time-to-live) of the records in the record set. + TTL *int64 `json:"TTL,omitempty"` + // Fqdn - READ-ONLY; Fully qualified domain name of the record set. + Fqdn *string `json:"fqdn,omitempty"` + // ProvisioningState - READ-ONLY; provisioning State of the record set. + ProvisioningState *string `json:"provisioningState,omitempty"` + // TargetResource - A reference to an azure resource from where the dns resource value is taken. + TargetResource *SubResource `json:"targetResource,omitempty"` + // ARecords - The list of A records in the record set. + ARecords *[]ARecord `json:"ARecords,omitempty"` + // AaaaRecords - The list of AAAA records in the record set. + AaaaRecords *[]AaaaRecord `json:"AAAARecords,omitempty"` + // MxRecords - The list of MX records in the record set. + MxRecords *[]MxRecord `json:"MXRecords,omitempty"` + // NsRecords - The list of NS records in the record set. + NsRecords *[]NsRecord `json:"NSRecords,omitempty"` + // PtrRecords - The list of PTR records in the record set. + PtrRecords *[]PtrRecord `json:"PTRRecords,omitempty"` + // SrvRecords - The list of SRV records in the record set. + SrvRecords *[]SrvRecord `json:"SRVRecords,omitempty"` + // TxtRecords - The list of TXT records in the record set. + TxtRecords *[]TxtRecord `json:"TXTRecords,omitempty"` + // CnameRecord - The CNAME record in the record set. + CnameRecord *CnameRecord `json:"CNAMERecord,omitempty"` + // SoaRecord - The SOA record in the record set. + SoaRecord *SoaRecord `json:"SOARecord,omitempty"` + // CaaRecords - The list of CAA records in the record set. + CaaRecords *[]CaaRecord `json:"caaRecords,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecordSetProperties. +func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rsp.Metadata != nil { + objectMap["metadata"] = rsp.Metadata + } + if rsp.TTL != nil { + objectMap["TTL"] = rsp.TTL + } + if rsp.TargetResource != nil { + objectMap["targetResource"] = rsp.TargetResource + } + if rsp.ARecords != nil { + objectMap["ARecords"] = rsp.ARecords + } + if rsp.AaaaRecords != nil { + objectMap["AAAARecords"] = rsp.AaaaRecords + } + if rsp.MxRecords != nil { + objectMap["MXRecords"] = rsp.MxRecords + } + if rsp.NsRecords != nil { + objectMap["NSRecords"] = rsp.NsRecords + } + if rsp.PtrRecords != nil { + objectMap["PTRRecords"] = rsp.PtrRecords + } + if rsp.SrvRecords != nil { + objectMap["SRVRecords"] = rsp.SrvRecords + } + if rsp.TxtRecords != nil { + objectMap["TXTRecords"] = rsp.TxtRecords + } + if rsp.CnameRecord != nil { + objectMap["CNAMERecord"] = rsp.CnameRecord + } + if rsp.SoaRecord != nil { + objectMap["SOARecord"] = rsp.SoaRecord + } + if rsp.CaaRecords != nil { + objectMap["caaRecords"] = rsp.CaaRecords + } + return json.Marshal(objectMap) +} + +// RecordSetUpdateParameters parameters supplied to update a record set. +type RecordSetUpdateParameters struct { + // RecordSet - Specifies information about the record set being updated. + RecordSet *RecordSet `json:"RecordSet,omitempty"` +} + +// Resource common properties of an Azure Resource Manager resource +type Resource struct { + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// ResourceReference represents a single Azure resource and its referencing DNS records. +type ResourceReference struct { + // DNSResources - A list of dns Records + DNSResources *[]SubResource `json:"dnsResources,omitempty"` + // TargetResource - A reference to an azure resource from where the dns resource value is taken. + TargetResource *SubResource `json:"targetResource,omitempty"` +} + +// ResourceReferenceRequest represents the properties of the Dns Resource Reference Request. +type ResourceReferenceRequest struct { + // ResourceReferenceRequestProperties - The properties of the Resource Reference Request. + *ResourceReferenceRequestProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceReferenceRequest. +func (rrr ResourceReferenceRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rrr.ResourceReferenceRequestProperties != nil { + objectMap["properties"] = rrr.ResourceReferenceRequestProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ResourceReferenceRequest struct. +func (rrr *ResourceReferenceRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var resourceReferenceRequestProperties ResourceReferenceRequestProperties + err = json.Unmarshal(*v, &resourceReferenceRequestProperties) + if err != nil { + return err + } + rrr.ResourceReferenceRequestProperties = &resourceReferenceRequestProperties + } + } + } + + return nil +} + +// ResourceReferenceRequestProperties represents the properties of the Dns Resource Reference Request. +type ResourceReferenceRequestProperties struct { + // TargetResources - A list of references to azure resources for which referencing dns records need to be queried. + TargetResources *[]SubResource `json:"targetResources,omitempty"` +} + +// ResourceReferenceResult represents the properties of the Dns Resource Reference Result. +type ResourceReferenceResult struct { + autorest.Response `json:"-"` + // ResourceReferenceResultProperties - The result of dns resource reference request. Returns a list of dns resource references for each of the azure resource in the request. + *ResourceReferenceResultProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceReferenceResult. +func (rrr ResourceReferenceResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rrr.ResourceReferenceResultProperties != nil { + objectMap["properties"] = rrr.ResourceReferenceResultProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ResourceReferenceResult struct. +func (rrr *ResourceReferenceResult) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var resourceReferenceResultProperties ResourceReferenceResultProperties + err = json.Unmarshal(*v, &resourceReferenceResultProperties) + if err != nil { + return err + } + rrr.ResourceReferenceResultProperties = &resourceReferenceResultProperties + } + } + } + + return nil +} + +// ResourceReferenceResultProperties the result of dns resource reference request. Returns a list of dns +// resource references for each of the azure resource in the request. +type ResourceReferenceResultProperties struct { + // DNSResourceReferences - The result of dns resource reference request. A list of dns resource references for each of the azure resource in the request + DNSResourceReferences *[]ResourceReference `json:"dnsResourceReferences,omitempty"` +} + +// SoaRecord an SOA record. +type SoaRecord struct { + // Host - The domain name of the authoritative name server for this SOA record. + Host *string `json:"host,omitempty"` + // Email - The email contact for this SOA record. + Email *string `json:"email,omitempty"` + // SerialNumber - The serial number for this SOA record. + SerialNumber *int64 `json:"serialNumber,omitempty"` + // RefreshTime - The refresh value for this SOA record. + RefreshTime *int64 `json:"refreshTime,omitempty"` + // RetryTime - The retry time for this SOA record. + RetryTime *int64 `json:"retryTime,omitempty"` + // ExpireTime - The expire time for this SOA record. + ExpireTime *int64 `json:"expireTime,omitempty"` + // MinimumTTL - The minimum value for this SOA record. By convention this is used to determine the negative caching duration. + MinimumTTL *int64 `json:"minimumTTL,omitempty"` +} + +// SrvRecord an SRV record. +type SrvRecord struct { + // Priority - The priority value for this SRV record. + Priority *int32 `json:"priority,omitempty"` + // Weight - The weight value for this SRV record. + Weight *int32 `json:"weight,omitempty"` + // Port - The port value for this SRV record. + Port *int32 `json:"port,omitempty"` + // Target - The target domain name for this SRV record. + Target *string `json:"target,omitempty"` +} + +// SubResource a reference to a another resource +type SubResource struct { + // ID - Resource Id. + ID *string `json:"id,omitempty"` +} + +// TxtRecord a TXT record. +type TxtRecord struct { + // Value - The text value of this TXT record. + Value *[]string `json:"value,omitempty"` +} + +// Zone describes a DNS zone. +type Zone struct { + autorest.Response `json:"-"` + // Etag - The etag of the zone. + Etag *string `json:"etag,omitempty"` + // ZoneProperties - The properties of the zone. + *ZoneProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Zone. +func (z Zone) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if z.Etag != nil { + objectMap["etag"] = z.Etag + } + if z.ZoneProperties != nil { + objectMap["properties"] = z.ZoneProperties + } + if z.Location != nil { + objectMap["location"] = z.Location + } + if z.Tags != nil { + objectMap["tags"] = z.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Zone struct. +func (z *Zone) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + z.Etag = &etag + } + case "properties": + if v != nil { + var zoneProperties ZoneProperties + err = json.Unmarshal(*v, &zoneProperties) + if err != nil { + return err + } + z.ZoneProperties = &zoneProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + z.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + z.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + z.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + z.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + z.Tags = tags + } + } + } + + return nil +} + +// ZoneListResult the response to a Zone List or ListAll operation. +type ZoneListResult struct { + autorest.Response `json:"-"` + // Value - Information about the DNS zones. + Value *[]Zone `json:"value,omitempty"` + // NextLink - READ-ONLY; The continuation token for the next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ZoneListResultIterator provides access to a complete listing of Zone values. +type ZoneListResultIterator struct { + i int + page ZoneListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ZoneListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ZoneListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ZoneListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ZoneListResultIterator) Response() ZoneListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ZoneListResultIterator) Value() Zone { + if !iter.page.NotDone() { + return Zone{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ZoneListResultIterator type. +func NewZoneListResultIterator(page ZoneListResultPage) ZoneListResultIterator { + return ZoneListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (zlr ZoneListResult) IsEmpty() bool { + return zlr.Value == nil || len(*zlr.Value) == 0 +} + +// zoneListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (zlr ZoneListResult) zoneListResultPreparer(ctx context.Context) (*http.Request, error) { + if zlr.NextLink == nil || len(to.String(zlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(zlr.NextLink))) +} + +// ZoneListResultPage contains a page of Zone values. +type ZoneListResultPage struct { + fn func(context.Context, ZoneListResult) (ZoneListResult, error) + zlr ZoneListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ZoneListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.zlr) + if err != nil { + return err + } + page.zlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ZoneListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ZoneListResultPage) NotDone() bool { + return !page.zlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ZoneListResultPage) Response() ZoneListResult { + return page.zlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ZoneListResultPage) Values() []Zone { + if page.zlr.IsEmpty() { + return nil + } + return *page.zlr.Value +} + +// Creates a new instance of the ZoneListResultPage type. +func NewZoneListResultPage(getNextPage func(context.Context, ZoneListResult) (ZoneListResult, error)) ZoneListResultPage { + return ZoneListResultPage{fn: getNextPage} +} + +// ZoneProperties represents the properties of the zone. +type ZoneProperties struct { + // MaxNumberOfRecordSets - READ-ONLY; The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"` + // NumberOfRecordSets - READ-ONLY; The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"` + // NameServers - READ-ONLY; The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + NameServers *[]string `json:"nameServers,omitempty"` + // ZoneType - The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private' + ZoneType ZoneType `json:"zoneType,omitempty"` + // RegistrationVirtualNetworks - A list of references to virtual networks that register hostnames in this DNS zone. This is a only when ZoneType is Private. + RegistrationVirtualNetworks *[]SubResource `json:"registrationVirtualNetworks,omitempty"` + // ResolutionVirtualNetworks - A list of references to virtual networks that resolve records in this DNS zone. This is a only when ZoneType is Private. + ResolutionVirtualNetworks *[]SubResource `json:"resolutionVirtualNetworks,omitempty"` +} + +// ZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ZonesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ZonesDeleteFuture) Result(client ZonesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ZoneUpdate describes a request to update a DNS zone. +type ZoneUpdate struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ZoneUpdate. +func (zu ZoneUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if zu.Tags != nil { + objectMap["tags"] = zu.Tags + } + return json.Marshal(objectMap) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go new file mode 100644 index 00000000000..a54a79fa17d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go @@ -0,0 +1,779 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RecordSetsClient is the the DNS Management Client. +type RecordSetsClient struct { + BaseClient +} + +// NewRecordSetsClient creates an instance of the RecordSetsClient client. +func NewRecordSetsClient(subscriptionID string) RecordSetsClient { + return NewRecordSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient client. +func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { + return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a record set within a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. Record sets of type SOA can be updated but not +// created (they are created when the DNS zone is created). +// parameters - parameters supplied to the CreateOrUpdate operation. +// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify +// the last-seen etag value to prevent accidentally overwriting any concurrent changes. +// ifNoneMatch - set to '*' to allow a new record set to be created, but to prevent updating an existing record +// set. Other values will be ignored. +func (client RecordSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RecordSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a record set from a DNS zone. This operation cannot be undone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are +// deleted when the DNS zone is deleted). +// ifMatch - the etag of the record set. Omit this value to always delete the current record set. Specify the +// last-seen etag value to prevent accidentally deleting any concurrent changes. +func (client RecordSetsClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RecordSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a record set. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. +func (client RecordSetsClient) Get(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RecordSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllByDNSZone lists all record sets in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordSetNameSuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListAllByDNSZone(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListAllByDNSZone") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllByDNSZoneNextResults + req, err := client.ListAllByDNSZonePreparer(ctx, resourceGroupName, zoneName, top, recordSetNameSuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllByDNSZoneSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListAllByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", resp, "Failure responding to request") + } + + return +} + +// ListAllByDNSZonePreparer prepares the ListAllByDNSZone request. +func (client RecordSetsClient) ListAllByDNSZonePreparer(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordSetNameSuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordSetNameSuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/all", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllByDNSZoneSender sends the ListAllByDNSZone request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListAllByDNSZoneSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListAllByDNSZoneResponder handles the response to the ListAllByDNSZone request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListAllByDNSZoneResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllByDNSZoneNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listAllByDNSZoneNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllByDNSZoneSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllByDNSZoneComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListAllByDNSZoneComplete(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListAllByDNSZone") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAllByDNSZone(ctx, resourceGroupName, zoneName, top, recordSetNameSuffix) + return +} + +// ListByDNSZone lists all record sets in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListByDNSZone(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByDNSZoneNextResults + req, err := client.ListByDNSZonePreparer(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing request") + return + } + + resp, err := client.ListByDNSZoneSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure responding to request") + } + + return +} + +// ListByDNSZonePreparer prepares the ListByDNSZone request. +func (client RecordSetsClient) ListByDNSZonePreparer(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/recordsets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByDNSZoneSender sends the ListByDNSZone request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListByDNSZoneSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByDNSZoneResponder handles the response to the ListByDNSZone request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListByDNSZoneResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByDNSZoneNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listByDNSZoneNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByDNSZoneSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByDNSZoneComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListByDNSZoneComplete(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByDNSZone(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix) + return +} + +// ListByType lists the record sets of a specified type in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// recordType - the type of record sets to enumerate. +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListByType(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByTypeNextResults + req, err := client.ListByTypePreparer(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request") + return + } + + resp, err := client.ListByTypeSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListByTypeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to request") + } + + return +} + +// ListByTypePreparer prepares the ListByType request. +func (client RecordSetsClient) ListByTypePreparer(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByTypeSender sends the ListByType request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByTypeResponder handles the response to the ListByType request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByTypeNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listByTypeNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByTypeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByTypeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByTypeComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListByTypeComplete(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByType(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) + return +} + +// Update updates a record set within a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. +// parameters - parameters supplied to the Update operation. +// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify +// the last-seen etag value to prevent accidentally overwriting concurrent changes. +func (client RecordSetsClient) Update(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RecordSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go new file mode 100644 index 00000000000..35f01ca41b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go @@ -0,0 +1,117 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ResourceReferenceClient is the the DNS Management Client. +type ResourceReferenceClient struct { + BaseClient +} + +// NewResourceReferenceClient creates an instance of the ResourceReferenceClient client. +func NewResourceReferenceClient(subscriptionID string) ResourceReferenceClient { + return NewResourceReferenceClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceReferenceClientWithBaseURI creates an instance of the ResourceReferenceClient client. +func NewResourceReferenceClientWithBaseURI(baseURI string, subscriptionID string) ResourceReferenceClient { + return ResourceReferenceClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetByTargetResources returns the DNS records specified by the referencing targetResourceIds. +// Parameters: +// parameters - properties for dns resource reference request. +func (client ResourceReferenceClient) GetByTargetResources(ctx context.Context, parameters ResourceReferenceRequest) (result ResourceReferenceResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceReferenceClient.GetByTargetResources") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetByTargetResourcesPreparer(ctx, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", nil, "Failure preparing request") + return + } + + resp, err := client.GetByTargetResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", resp, "Failure sending request") + return + } + + result, err = client.GetByTargetResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", resp, "Failure responding to request") + } + + return +} + +// GetByTargetResourcesPreparer prepares the GetByTargetResources request. +func (client ResourceReferenceClient) GetByTargetResourcesPreparer(ctx context.Context, parameters ResourceReferenceRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/getDnsResourceReference", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetByTargetResourcesSender sends the GetByTargetResources request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceReferenceClient) GetByTargetResourcesSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetByTargetResourcesResponder handles the response to the GetByTargetResources request. The method always +// closes the http.Response Body. +func (client ResourceReferenceClient) GetByTargetResourcesResponder(resp *http.Response) (result ResourceReferenceResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go new file mode 100644 index 00000000000..7762623de8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go @@ -0,0 +1,30 @@ +package dns + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " dns/2018-05-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go new file mode 100644 index 00000000000..de1ede07511 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go @@ -0,0 +1,611 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ZonesClient is the the DNS Management Client. +type ZonesClient struct { + BaseClient +} + +// NewZonesClient creates an instance of the ZonesClient client. +func NewZonesClient(subscriptionID string) ZonesClient { + return NewZonesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewZonesClientWithBaseURI creates an instance of the ZonesClient client. +func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient { + return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// parameters - parameters supplied to the CreateOrUpdate operation. +// ifMatch - the etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the +// last-seen etag value to prevent accidentally overwriting any concurrent changes. +// ifNoneMatch - set to '*' to allow a new DNS zone to be created, but to prevent updating an existing zone. +// Other values will be ignored. +func (client ZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ZonesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot be +// undone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// ifMatch - the etag of the DNS zone. Omit this value to always delete the current zone. Specify the last-seen +// etag value to prevent accidentally deleting any concurrent changes. +func (client ZonesClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (result ZonesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ZonesClient) DeletePreparer(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) DeleteSender(req *http.Request) (future ZonesDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a DNS zone. Retrieves the zone properties, but not the record sets within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +func (client ZonesClient) Get(ctx context.Context, resourceGroupName string, zoneName string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, zoneName) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ZonesClient) GetPreparer(ctx context.Context, resourceGroupName string, zoneName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the DNS zones in all resource groups in a subscription. +// Parameters: +// top - the maximum number of DNS zones to return. If not specified, returns up to 100 zones. +func (client ZonesClient) List(ctx context.Context, top *int32) (result ZoneListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List") + defer func() { + sc := -1 + if result.zlr.Response.Response != nil { + sc = result.zlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, top) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.zlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request") + return + } + + result.zlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ZonesClient) ListPreparer(ctx context.Context, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ZonesClient) ListResponder(resp *http.Response) (result ZoneListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ZonesClient) listNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.zoneListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ZonesClient) ListComplete(ctx context.Context, top *int32) (result ZoneListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, top) + return +} + +// ListByResourceGroup lists the DNS zones within a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +func (client ZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.zlr.Response.Response != nil { + sc = result.zlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.zlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.zlr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ZonesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ZonesClient) ListByResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ZonesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.zoneListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top) + return +} + +// Update updates a DNS zone. Does not modify DNS records within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// parameters - parameters supplied to the Update operation. +// ifMatch - the etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the +// last-seen etag value to prevent accidentally overwriting any concurrent changes. +func (client ZonesClient) Update(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ZonesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ZonesClient) UpdateResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 00000000000..b7139293081 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v32.6.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000000..fec416a9c41 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000000..fa5964742fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000000..914f8af5e4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,269 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 00000000000..fdc5b90ca5c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/tracing v0.5.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 00000000000..f0a018563b5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 00000000000..28a4bfc4c43 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000000..9e15f2751f2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000000..d7e4372bbc5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,95 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000000..7c7fca3718f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1112 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + asMSIEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + asMSISecretEnv = "MSI_SECRET" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +func isAppService() bool { + _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) + + return asMSIEndpointEnvExists && asMSISecretEnvExists +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions +func GetMSIAppServiceEndpoint() (string, error) { + asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + + if asMSIEndpointEnvExists { + return asMSIEndpoint, nil + } + return "", errors.New("MSI endpoint not found") +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +func GetMSIEndpoint() (string, error) { + if isAppService() { + return GetMSIAppServiceEndpoint() + } + return GetMSIVMEndpoint() +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + // App Service MSI currently only supports token API version 2017-09-01 + if isAppService() { + v.Set("api-version", "2017-09-01") + } else { + v.Set("api-version", "2018-02-01") + } + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return (u.Host == imds.Host && u.Path == imds.Path) || isAppService() +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + // Add header when runtime is on App Service or Functions + if isAppService() { + asMSISecret, _ := os.LookupEnv(asMSISecretEnv) + req.Header.Add("Secret", asMSISecret) + } + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + // don't return a TokenRefreshError here; this will allow retry logic to apply + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000000..c867b348439 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000000..54e87b5b648 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,336 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 00000000000..89a659cb664 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,67 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery != "" { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } else { + r.URL.RawQuery = sas.sasToken + } + + r.RequestURI = r.URL.String() + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 00000000000..33e5f127017 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,301 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000000..aafdf021fd6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000000..1cb41cbeb1b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,924 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 00000000000..5f02026b391 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,737 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// The possible keys in the Values map. +const ( + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + TenantID = "AZURE_TENANT_ID" + AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" + ClientID = "AZURE_CLIENT_ID" + ClientSecret = "AZURE_CLIENT_SECRET" + CertificatePath = "AZURE_CERTIFICATE_PATH" + CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" + Username = "AZURE_USERNAME" + Password = "AZURE_PASSWORD" + EnvironmentName = "AZURE_ENVIRONMENT" + Resource = "AZURE_AD_RESOURCE" + ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" + ResourceManagerEndpoint = "ResourceManagerEndpoint" + GraphResourceID = "GraphResourceID" + SQLManagementEndpoint = "SQLManagementEndpoint" + GalleryEndpoint = "GalleryEndpoint" + ManagementEndpoint = "ManagementEndpoint" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + return settings.GetAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + settings.Values[Resource] = resource + return settings.GetAuthorizer() +} + +// EnvironmentSettings contains the available authentication settings. +type EnvironmentSettings struct { + Values map[string]string + Environment azure.Environment +} + +// GetSettingsFromEnvironment returns the available authentication settings from the environment. +func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { + s = EnvironmentSettings{ + Values: map[string]string{}, + } + s.setValue(SubscriptionID) + s.setValue(TenantID) + s.setValue(AuxiliaryTenantIDs) + s.setValue(ClientID) + s.setValue(ClientSecret) + s.setValue(CertificatePath) + s.setValue(CertificatePassword) + s.setValue(Username) + s.setValue(Password) + s.setValue(EnvironmentName) + s.setValue(Resource) + if v := s.Values[EnvironmentName]; v == "" { + s.Environment = azure.PublicCloud + } else { + s.Environment, err = azure.EnvironmentFromName(v) + } + if s.Values[Resource] == "" { + s.Values[Resource] = s.Environment.ResourceManagerEndpoint + } + return +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings EnvironmentSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified environment variable value to the Values map if it exists +func (settings EnvironmentSettings) setValue(key string) { + if v := os.Getenv(key); v != "" { + settings.Values[key] = v + } +} + +// helper to return client and tenant IDs +func (settings EnvironmentSettings) getClientAndTenant() (string, string) { + clientID := settings.Values[ClientID] + tenantID := settings.Values[TenantID] + return clientID, tenantID +} + +// GetClientCredentials creates a config object from the available client credentials. +// An error is returned if no client credentials are available. +func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { + secret := settings.Values[ClientSecret] + if secret == "" { + return ClientCredentialsConfig{}, errors.New("missing client secret") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCredentialsConfig(clientID, secret, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { + config.AuxTenants = strings.Split(auxTenants, ";") + for i := range config.AuxTenants { + config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) + } + } + return config, nil +} + +// GetClientCertificate creates a config object from the available certificate credentials. +// An error is returned if no certificate credentials are available. +func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { + certPath := settings.Values[CertificatePath] + if certPath == "" { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + certPwd := settings.Values[CertificatePassword] + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetUsernamePassword creates a config object from the available username/password credentials. +// An error is returned if no username/password credentials are available. +func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { + username := settings.Values[Username] + password := settings.Values[Password] + if username == "" || password == "" { + return UsernamePasswordConfig{}, errors.New("missing username/password") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetMSI creates a MSI config object from the available client ID. +func (settings EnvironmentSettings) GetMSI() MSIConfig { + config := NewMSIConfig() + config.Resource = settings.Values[Resource] + config.ClientID = settings.Values[ClientID] + return config +} + +// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. +func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { + clientID, tenantID := settings.getClientAndTenant() + config := NewDeviceFlowConfig(clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config +} + +// GetAuthorizer creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if c, e := settings.GetClientCredentials(); e == nil { + return c.Authorizer() + } + + //2. Client Certificate + if c, e := settings.GetClientCertificate(); e == nil { + return c.Authorizer() + } + + //3. Username Password + if c, e := settings.GetUsernamePassword(); e == nil { + return c.Authorizer() + } + + // 4. MSI + return settings.GetMSI().Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil { + return a, err + } + if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { + s, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { + return a, err + } + if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + if settings.Values[Resource] == "" { + settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + +// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. +func GetSettingsFromFile() (FileSettings, error) { + s := FileSettings{} + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return s, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return s, err + } + + authFile := map[string]interface{}{} + err = json.Unmarshal(decoded, &authFile) + if err != nil { + return s, err + } + + s.Values = map[string]string{} + s.setKeyValue(ClientID, authFile["clientId"]) + s.setKeyValue(ClientSecret, authFile["clientSecret"]) + s.setKeyValue(CertificatePath, authFile["clientCertificate"]) + s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) + s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) + s.setKeyValue(TenantID, authFile["tenantId"]) + s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) + s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) + s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) + s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) + s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) + s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) + return s, nil +} + +// FileSettings contains the available authentication settings. +type FileSettings struct { + Values map[string]string +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings FileSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified value to the Values map if it isn't nil +func (settings FileSettings) setKeyValue(key string, val interface{}) { + if val != nil { + settings.Values[key] = val.(string) + } +} + +// returns the specified AAD endpoint or the public cloud endpoint if unspecified +func (settings FileSettings) getAADEndpoint() string { + if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { + return v + } + return azure.PublicCloud.ActiveDirectoryEndpoint +} + +// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) +} + +// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. +func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCredentialsAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken +// from the available client credentials and the specified resource. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { + if _, ok := settings.Values[ClientSecret]; !ok { + return nil, errors.New("missing client secret") + } + config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) +} + +func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { + if _, ok := settings.Values[CertificatePath]; !ok { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) + cfg.AADEndpoint = settings.getAADEndpoint() + cfg.Resource = resource + return cfg, nil +} + +// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. +func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) +} + +// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. +func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCertificateAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.ServicePrincipalToken() +} + +// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. +func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.Authorizer() +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return settings.Values[ManagementEndpoint], nil + case azure.PublicCloud.ResourceManagerEndpoint: + return settings.Values[ResourceManagerEndpoint], nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return settings.Values[ActiveDirectoryEndpoint], nil + case azure.PublicCloud.GalleryEndpoint: + return settings.Values[GalleryEndpoint], nil + case azure.PublicCloud.GraphEndpoint: + return settings.Values[GraphResourceID], nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AuxTenants []string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil + } + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := dfc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalToken gets the service principal token from device flow. +func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + if err != nil { + return nil, err + } + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + log.Println(*deviceCode.Message) + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from username and password. +func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ups.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIEndpoint() + if err != nil { + return nil, err + } + + var spToken *adal.ServicePrincipalToken + if mc.ClientID == "" { + spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + } else { + spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err) + } + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod new file mode 100644 index 00000000000..e98dfe8623a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/auth + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 + github.com/dimchansky/utfbom v1.1.0 + golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum new file mode 100644 index 00000000000..d706fd5836b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum @@ -0,0 +1,36 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go new file mode 100644 index 00000000000..2f09cd177aa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000000..26be936b7e5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,335 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return err + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod new file mode 100644 index 00000000000..a58302914dc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/cli + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/dimchansky/utfbom v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum new file mode 100644 index 00000000000..542806b9471 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -0,0 +1,29 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go new file mode 100644 index 00000000000..618bed392fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 00000000000..f45c3a516d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,83 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +func configDir() string { + return os.Getenv("AZURE_CONFIG_DIR") +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 00000000000..44ff446f669 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,175 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +const accessTokensJSON = "accessTokens.json" + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens through environment variable. + if accessTokenPath := os.Getenv("AZURE_ACCESS_TOKEN_FILE"); accessTokenPath != "" { + return accessTokenPath, nil + } + + // Azure-CLI allows user to customize the path to Azure config directory through environment variable. + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, accessTokensJSON), nil + } + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + return homedir.Expand("~/.azure/" + accessTokensJSON) +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000000..6c20b8179ab --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,244 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000000..507f9e95cf1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000000..c6d39f68665 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000000..1c6a0617a1f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,300 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000000..c4571065685 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 00000000000..3adc4804c3d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest/autorest v0.9.0 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 00000000000..9e2ee7a9484 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,16 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 00000000000..55adf930f4a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000000..b453fad0491 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000000..48fb39ba9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000000..7073959b2a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000000..12addf0ebb4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000000..f724f33327e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 00000000000..6f1fcd4a4db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 00000000000..e0d94da0a25 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,30 @@ +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000000..6e8ed64eba1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,550 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000000..349e1963a2c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000000..fa11dbed79b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000000..7143cc61b58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000000..ae15c6bf962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000000..5e595d7b1a3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,407 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: roundTripper} +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 00000000000..86694bd2555 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 00000000000..a2054be36cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000000..08cf11c1189 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,228 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000000..56a29b2c5d0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v13.3.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 00000000000..f22ed56bcde --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000000..da09f394c5d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 00000000000..25c34c1085a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000000..0e7a6e96254 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/DataDog/datadog-go/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/LICENSE.txt new file mode 100644 index 00000000000..97cd06d7fb1 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2015 Datadog, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/DataDog/datadog-go/statsd/README.md b/vendor/github.com/DataDog/datadog-go/statsd/README.md new file mode 100644 index 00000000000..a2bca43b9af --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/README.md @@ -0,0 +1,5 @@ +## Overview + +Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags +and histograms. + diff --git a/vendor/github.com/DataDog/datadog-go/statsd/options.go b/vendor/github.com/DataDog/datadog-go/statsd/options.go new file mode 100644 index 00000000000..2c5a59cd53f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/options.go @@ -0,0 +1,109 @@ +package statsd + +import "time" + +var ( + // DefaultNamespace is the default value for the Namespace option + DefaultNamespace = "" + // DefaultTags is the default value for the Tags option + DefaultTags = []string{} + // DefaultBuffered is the default value for the Buffered option + DefaultBuffered = false + // DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option + DefaultMaxMessagesPerPayload = 16 + // DefaultAsyncUDS is the default value for the AsyncUDS option + DefaultAsyncUDS = false + // DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option + DefaultWriteTimeoutUDS = 1 * time.Millisecond +) + +// Options contains the configuration options for a client. +type Options struct { + // Namespace to prepend to all metrics, events and service checks name. + Namespace string + // Tags are global tags to be applied to every metrics, events and service checks. + Tags []string + // Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered + // until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service + // checks or after 100ms since the payload startedto be built. + Buffered bool + // MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain. + // Note that this option only takes effect when the client is buffered. + MaxMessagesPerPayload int + // AsyncUDS allows to switch between async and blocking mode for UDS. + // Blocking mode allows for error checking but does not guarentee that calls won't block the execution. + AsyncUDS bool + // WriteTimeoutUDS is the timeout after which a UDS packet is dropped. + WriteTimeoutUDS time.Duration +} + +func resolveOptions(options []Option) (*Options, error) { + o := &Options{ + Namespace: DefaultNamespace, + Tags: DefaultTags, + Buffered: DefaultBuffered, + MaxMessagesPerPayload: DefaultMaxMessagesPerPayload, + AsyncUDS: DefaultAsyncUDS, + WriteTimeoutUDS: DefaultWriteTimeoutUDS, + } + + for _, option := range options { + err := option(o) + if err != nil { + return nil, err + } + } + + return o, nil +} + +// Option is a client option. Can return an error if validation fails. +type Option func(*Options) error + +// WithNamespace sets the Namespace option. +func WithNamespace(namespace string) Option { + return func(o *Options) error { + o.Namespace = namespace + return nil + } +} + +// WithTags sets the Tags option. +func WithTags(tags []string) Option { + return func(o *Options) error { + o.Tags = tags + return nil + } +} + +// Buffered sets the Buffered option. +func Buffered() Option { + return func(o *Options) error { + o.Buffered = true + return nil + } +} + +// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option. +func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { + return func(o *Options) error { + o.MaxMessagesPerPayload = maxMessagesPerPayload + return nil + } +} + +// WithAsyncUDS sets the AsyncUDS option. +func WithAsyncUDS() Option { + return func(o *Options) error { + o.AsyncUDS = true + return nil + } +} + +// WithWriteTimeoutUDS sets the WriteTimeoutUDS option. +func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option { + return func(o *Options) error { + o.WriteTimeoutUDS = writeTimeoutUDS + return nil + } +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go new file mode 100644 index 00000000000..71a113cfcef --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go @@ -0,0 +1,757 @@ +// Copyright 2013 Ooyala, Inc. + +/* +Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd, +adding tags and histograms and pushing upstream to Datadog. + +Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. + +Example Usage: + + // Create the client + c, err := statsd.New("127.0.0.1:8125") + if err != nil { + log.Fatal(err) + } + // Prefix every metric with the app name + c.Namespace = "flubber." + // Send the EC2 availability zone as a tag with every metric + c.Tags = append(c.Tags, "us-east-1a") + err = c.Gauge("request.duration", 1.2, nil, 1) + +statsd is based on go-statsd-client. +*/ +package statsd + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "time" +) + +/* +OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes +is optimal for regular networks with an MTU of 1500 so datagrams don't get +fragmented. It's generally recommended not to fragment UDP datagrams as losing +a single fragment will cause the entire datagram to be lost. + +This can be increased if your network has a greater MTU or you don't mind UDP +datagrams getting fragmented. The practical limit is MaxUDPPayloadSize +*/ +const OptimalPayloadSize = 1432 + +/* +MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. +Its value comes from the calculation: 65535 bytes Max UDP datagram size - +8byte UDP header - 60byte max IP headers +any number greater than that will see frames being cut out. +*/ +const MaxUDPPayloadSize = 65467 + +/* +UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket +traffic instead of UDP. +*/ +const UnixAddressPrefix = "unix://" + +// Client-side entity ID injection for container tagging +const ( + entityIDEnvName = "DD_ENTITY_ID" + entityIDTagName = "dd.internal.entity_id" +) + +/* +Stat suffixes +*/ +var ( + gaugeSuffix = []byte("|g") + countSuffix = []byte("|c") + histogramSuffix = []byte("|h") + distributionSuffix = []byte("|d") + decrSuffix = []byte("-1|c") + incrSuffix = []byte("1|c") + setSuffix = []byte("|s") + timingSuffix = []byte("|ms") +) + +// A statsdWriter offers a standard interface regardless of the underlying +// protocol. For now UDS and UPD writers are available. +type statsdWriter interface { + Write(data []byte) (n int, err error) + SetWriteTimeout(time.Duration) error + Close() error +} + +// A Client is a handle for sending messages to dogstatsd. It is safe to +// use one Client from multiple goroutines simultaneously. +type Client struct { + // Writer handles the underlying networking protocol + writer statsdWriter + // Namespace to prepend to all statsd calls + Namespace string + // Tags are global tags to be added to every statsd call + Tags []string + // skipErrors turns off error passing and allows UDS to emulate UDP behaviour + SkipErrors bool + // BufferLength is the length of the buffer in commands. + bufferLength int + flushTime time.Duration + commands [][]byte + buffer bytes.Buffer + stop chan struct{} + sync.Mutex +} + +// New returns a pointer to a new Client given an addr in the format "hostname:port" or +// "unix:///path/to/socket". +func New(addr string, options ...Option) (*Client, error) { + o, err := resolveOptions(options) + if err != nil { + return nil, err + } + + var w statsdWriter + + if !strings.HasPrefix(addr, UnixAddressPrefix) { + w, err = newUDPWriter(addr) + } else if o.AsyncUDS { + w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } else { + w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } + if err != nil { + return nil, err + } + w.SetWriteTimeout(o.WriteTimeoutUDS) + + c := Client{ + Namespace: o.Namespace, + Tags: o.Tags, + writer: w, + } + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + c.Tags = append(c.Tags, entityTag) + } + + if o.Buffered { + c.bufferLength = o.MaxMessagesPerPayload + c.commands = make([][]byte, 0, o.MaxMessagesPerPayload) + c.flushTime = time.Millisecond * 100 + c.stop = make(chan struct{}, 1) + go c.watch() + } + + return &c, nil +} + +// NewWithWriter creates a new Client with given writer. Writer is a +// io.WriteCloser + SetWriteTimeout(time.Duration) error +func NewWithWriter(w statsdWriter) (*Client, error) { + client := &Client{writer: w, SkipErrors: false} + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + client.Tags = append(client.Tags, entityTag) + } + + return client, nil +} + +// NewBuffered returns a Client that buffers its output and sends it in chunks. +// Buflen is the length of the buffer in number of commands. +// +// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST +// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address. +func NewBuffered(addr string, buflen int) (*Client, error) { + return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen)) +} + +// format a message from its name, value, tags and rate. Also adds global +// namespace and tags. +func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) []byte { + // preallocated buffer, stack allocated as long as it doesn't escape + buf := make([]byte, 0, 200) + + if c.Namespace != "" { + buf = append(buf, c.Namespace...) + } + buf = append(buf, name...) + buf = append(buf, ':') + + switch val := value.(type) { + case float64: + buf = strconv.AppendFloat(buf, val, 'f', 6, 64) + + case int64: + buf = strconv.AppendInt(buf, val, 10) + + case string: + buf = append(buf, val...) + + default: + // do nothing + } + buf = append(buf, suffix...) + + if rate < 1 { + buf = append(buf, "|@"...) + buf = strconv.AppendFloat(buf, rate, 'f', -1, 64) + } + + buf = appendTagString(buf, c.Tags, tags) + + // non-zeroing copy to avoid referencing a larger than necessary underlying array + return append([]byte(nil), buf...) +} + +// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP. +func (c *Client) SetWriteTimeout(d time.Duration) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + return c.writer.SetWriteTimeout(d) +} + +func (c *Client) watch() { + ticker := time.NewTicker(c.flushTime) + + for { + select { + case <-ticker.C: + c.Lock() + if len(c.commands) > 0 { + // FIXME: eating error here + c.flushLocked() + } + c.Unlock() + case <-c.stop: + ticker.Stop() + return + } + } +} + +func (c *Client) append(cmd []byte) error { + c.Lock() + defer c.Unlock() + c.commands = append(c.commands, cmd) + // if we should flush, lets do it + if len(c.commands) == c.bufferLength { + if err := c.flushLocked(); err != nil { + return err + } + } + return nil +} + +func (c *Client) joinMaxSize(cmds [][]byte, sep string, maxSize int) ([][]byte, []int) { + c.buffer.Reset() //clear buffer + + var frames [][]byte + var ncmds []int + sepBytes := []byte(sep) + sepLen := len(sep) + + elem := 0 + for _, cmd := range cmds { + needed := len(cmd) + + if elem != 0 { + needed = needed + sepLen + } + + if c.buffer.Len()+needed <= maxSize { + if elem != 0 { + c.buffer.Write(sepBytes) + } + c.buffer.Write(cmd) + elem++ + } else { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + // if cmd is bigger than maxSize it will get flushed on next loop + c.buffer.Write(cmd) + elem = 1 + } + } + + //add whatever is left! if there's actually something + if c.buffer.Len() > 0 { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + } + + return frames, ncmds +} + +func copyAndResetBuffer(buf *bytes.Buffer) []byte { + tmpBuf := make([]byte, buf.Len()) + copy(tmpBuf, buf.Bytes()) + buf.Reset() + return tmpBuf +} + +// Flush forces a flush of the pending commands in the buffer +func (c *Client) Flush() error { + if c == nil { + return fmt.Errorf("Client is nil") + } + c.Lock() + defer c.Unlock() + return c.flushLocked() +} + +// flush the commands in the buffer. Lock must be held by caller. +func (c *Client) flushLocked() error { + frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize) + var err error + cmdsFlushed := 0 + for i, data := range frames { + _, e := c.writer.Write(data) + if e != nil { + err = e + break + } + cmdsFlushed += flushable[i] + } + + // clear the slice with a slice op, doesn't realloc + if cmdsFlushed == len(c.commands) { + c.commands = c.commands[:0] + } else { + //this case will cause a future realloc... + // drop problematic command though (sorry). + c.commands = c.commands[cmdsFlushed+1:] + } + return err +} + +func (c *Client) sendMsg(msg []byte) error { + // return an error if message is bigger than MaxUDPPayloadSize + if len(msg) > MaxUDPPayloadSize { + return errors.New("message size exceeds MaxUDPPayloadSize") + } + + // if this client is buffered, then we'll just append this + if c.bufferLength > 0 { + return c.append(msg) + } + + _, err := c.writer.Write(msg) + + if c.SkipErrors { + return nil + } + return err +} + +// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags. +func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + if rate < 1 && rand.Float64() > rate { + return nil + } + data := c.format(name, value, suffix, tags, rate) + return c.sendMsg(data) +} + +// Gauge measures the value of a metric at a particular time. +func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, gaugeSuffix, tags, rate) +} + +// Count tracks how many times something happened per second. +func (c *Client) Count(name string, value int64, tags []string, rate float64) error { + return c.send(name, value, countSuffix, tags, rate) +} + +// Histogram tracks the statistical distribution of a set of values on each host. +func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, histogramSuffix, tags, rate) +} + +// Distribution tracks the statistical distribution of a set of values across your infrastructure. +func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, distributionSuffix, tags, rate) +} + +// Decr is just Count of -1 +func (c *Client) Decr(name string, tags []string, rate float64) error { + return c.send(name, nil, decrSuffix, tags, rate) +} + +// Incr is just Count of 1 +func (c *Client) Incr(name string, tags []string, rate float64) error { + return c.send(name, nil, incrSuffix, tags, rate) +} + +// Set counts the number of unique elements in a group. +func (c *Client) Set(name string, value string, tags []string, rate float64) error { + return c.send(name, value, setSuffix, tags, rate) +} + +// Timing sends timing information, it is an alias for TimeInMilliseconds +func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { + return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) +} + +// TimeInMilliseconds sends timing information in milliseconds. +// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) +func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, timingSuffix, tags, rate) +} + +// Event sends the provided Event. +func (c *Client) Event(e *Event) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + stat, err := e.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg([]byte(stat)) +} + +// SimpleEvent sends an event with the provided title and text. +func (c *Client) SimpleEvent(title, text string) error { + e := NewEvent(title, text) + return c.Event(e) +} + +// ServiceCheck sends the provided ServiceCheck. +func (c *Client) ServiceCheck(sc *ServiceCheck) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + stat, err := sc.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg([]byte(stat)) +} + +// SimpleServiceCheck sends an serviceCheck with the provided name and status. +func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { + sc := NewServiceCheck(name, status) + return c.ServiceCheck(sc) +} + +// Close the client connection. +func (c *Client) Close() error { + if c == nil { + return fmt.Errorf("Client is nil") + } + select { + case c.stop <- struct{}{}: + default: + } + + // if this client is buffered, flush before closing the writer + if c.bufferLength > 0 { + if err := c.Flush(); err != nil { + return err + } + } + + return c.writer.Close() +} + +// Events support +// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 +// The reason why they got exported is so that client code can directly use the types. + +// EventAlertType is the alert type for events +type EventAlertType string + +const ( + // Info is the "info" AlertType for events + Info EventAlertType = "info" + // Error is the "error" AlertType for events + Error EventAlertType = "error" + // Warning is the "warning" AlertType for events + Warning EventAlertType = "warning" + // Success is the "success" AlertType for events + Success EventAlertType = "success" +) + +// EventPriority is the event priority for events +type EventPriority string + +const ( + // Normal is the "normal" Priority for events + Normal EventPriority = "normal" + // Low is the "low" Priority for events + Low EventPriority = "low" +) + +// An Event is an object that can be posted to your DataDog event stream. +type Event struct { + // Title of the event. Required. + Title string + // Text is the description of the event. Required. + Text string + // Timestamp is a timestamp for the event. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the event. + Hostname string + // AggregationKey groups this event with others of the same key. + AggregationKey string + // Priority of the event. Can be statsd.Low or statsd.Normal. + Priority EventPriority + // SourceTypeName is a source type for the event. + SourceTypeName string + // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. + // If absent, the default value applied by the dogstatsd server is Info. + AlertType EventAlertType + // Tags for the event. + Tags []string +} + +// NewEvent creates a new event with the given title and text. Error checking +// against these values is done at send-time, or upon running e.Check. +func NewEvent(title, text string) *Event { + return &Event{ + Title: title, + Text: text, + } +} + +// Check verifies that an event is valid. +func (e Event) Check() error { + if len(e.Title) == 0 { + return fmt.Errorf("statsd.Event title is required") + } + if len(e.Text) == 0 { + return fmt.Errorf("statsd.Event text is required") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an event. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (e Event) Encode(tags ...string) (string, error) { + err := e.Check() + if err != nil { + return "", err + } + text := e.escapedText() + + var buffer bytes.Buffer + buffer.WriteString("_e{") + buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10)) + buffer.WriteRune(',') + buffer.WriteString(strconv.FormatInt(int64(len(text)), 10)) + buffer.WriteString("}:") + buffer.WriteString(e.Title) + buffer.WriteRune('|') + buffer.WriteString(text) + + if !e.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10)) + } + + if len(e.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(e.Hostname) + } + + if len(e.AggregationKey) != 0 { + buffer.WriteString("|k:") + buffer.WriteString(e.AggregationKey) + + } + + if len(e.Priority) != 0 { + buffer.WriteString("|p:") + buffer.WriteString(string(e.Priority)) + } + + if len(e.SourceTypeName) != 0 { + buffer.WriteString("|s:") + buffer.WriteString(e.SourceTypeName) + } + + if len(e.AlertType) != 0 { + buffer.WriteString("|t:") + buffer.WriteString(string(e.AlertType)) + } + + writeTagString(&buffer, tags, e.Tags) + + return buffer.String(), nil +} + +// ServiceCheckStatus support +type ServiceCheckStatus byte + +const ( + // Ok is the "ok" ServiceCheck status + Ok ServiceCheckStatus = 0 + // Warn is the "warning" ServiceCheck status + Warn ServiceCheckStatus = 1 + // Critical is the "critical" ServiceCheck status + Critical ServiceCheckStatus = 2 + // Unknown is the "unknown" ServiceCheck status + Unknown ServiceCheckStatus = 3 +) + +// An ServiceCheck is an object that contains status of DataDog service check. +type ServiceCheck struct { + // Name of the service check. Required. + Name string + // Status of service check. Required. + Status ServiceCheckStatus + // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the serviceCheck. + Hostname string + // A message describing the current state of the serviceCheck. + Message string + // Tags for the serviceCheck. + Tags []string +} + +// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking +// against these values is done at send-time, or upon running sc.Check. +func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { + return &ServiceCheck{ + Name: name, + Status: status, + } +} + +// Check verifies that an event is valid. +func (sc ServiceCheck) Check() error { + if len(sc.Name) == 0 { + return fmt.Errorf("statsd.ServiceCheck name is required") + } + if byte(sc.Status) < 0 || byte(sc.Status) > 3 { + return fmt.Errorf("statsd.ServiceCheck status has invalid value") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an serviceCheck. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (sc ServiceCheck) Encode(tags ...string) (string, error) { + err := sc.Check() + if err != nil { + return "", err + } + message := sc.escapedMessage() + + var buffer bytes.Buffer + buffer.WriteString("_sc|") + buffer.WriteString(sc.Name) + buffer.WriteRune('|') + buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10)) + + if !sc.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10)) + } + + if len(sc.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(sc.Hostname) + } + + writeTagString(&buffer, tags, sc.Tags) + + if len(message) != 0 { + buffer.WriteString("|m:") + buffer.WriteString(message) + } + + return buffer.String(), nil +} + +func (e Event) escapedText() string { + return strings.Replace(e.Text, "\n", "\\n", -1) +} + +func (sc ServiceCheck) escapedMessage() string { + msg := strings.Replace(sc.Message, "\n", "\\n", -1) + return strings.Replace(msg, "m:", `m\:`, -1) +} + +func removeNewlines(str string) string { + return strings.Replace(str, "\n", "", -1) +} + +func writeTagString(w io.Writer, tagList1, tagList2 []string) { + // the tag lists may be shared with other callers, so we cannot modify + // them in any way (which means we cannot append to them either) + // therefore we must make an entirely separate copy just for this call + totalLen := len(tagList1) + len(tagList2) + if totalLen == 0 { + return + } + tags := make([]string, 0, totalLen) + tags = append(tags, tagList1...) + tags = append(tags, tagList2...) + + io.WriteString(w, "|#") + io.WriteString(w, removeNewlines(tags[0])) + for _, tag := range tags[1:] { + io.WriteString(w, ",") + io.WriteString(w, removeNewlines(tag)) + } +} + +func appendTagString(buf []byte, tagList1, tagList2 []string) []byte { + if len(tagList1) == 0 { + if len(tagList2) == 0 { + return buf + } + tagList1 = tagList2 + tagList2 = nil + } + + buf = append(buf, "|#"...) + buf = appendWithoutNewlines(buf, tagList1[0]) + for _, tag := range tagList1[1:] { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + for _, tag := range tagList2 { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + return buf +} + +func appendWithoutNewlines(buf []byte, s string) []byte { + // fastpath for strings without newlines + if strings.IndexByte(s, '\n') == -1 { + return append(buf, s...) + } + + for _, b := range []byte(s) { + if b != '\n' { + buf = append(buf, b) + } + } + return buf +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/statsd/udp.go new file mode 100644 index 00000000000..9ddff421c70 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/udp.go @@ -0,0 +1,73 @@ +package statsd + +import ( + "errors" + "fmt" + "net" + "os" + "time" +) + +const ( + autoHostEnvName = "DD_AGENT_HOST" + autoPortEnvName = "DD_DOGSTATSD_PORT" + defaultUDPPort = "8125" +) + +// udpWriter is an internal class wrapping around management of UDP connection +type udpWriter struct { + conn net.Conn +} + +// New returns a pointer to a new udpWriter given an addr in the format "hostname:port". +func newUDPWriter(addr string) (*udpWriter, error) { + if addr == "" { + addr = addressFromEnvironment() + } + if addr == "" { + return nil, errors.New("No address passed and autodetection from environment failed") + } + + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + writer := &udpWriter{conn: conn} + return writer, nil +} + +// SetWriteTimeout is not needed for UDP, returns error +func (w *udpWriter) SetWriteTimeout(d time.Duration) error { + return errors.New("SetWriteTimeout: not supported for UDP connections") +} + +// Write data to the UDP connection with no error handling +func (w *udpWriter) Write(data []byte) (int, error) { + return w.conn.Write(data) +} + +func (w *udpWriter) Close() error { + return w.conn.Close() +} + +func (w *udpWriter) remoteAddr() net.Addr { + return w.conn.RemoteAddr() +} + +func addressFromEnvironment() string { + autoHost := os.Getenv(autoHostEnvName) + if autoHost == "" { + return "" + } + + autoPort := os.Getenv(autoPortEnvName) + if autoPort == "" { + autoPort = defaultUDPPort + } + + return fmt.Sprintf("%s:%s", autoHost, autoPort) +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/statsd/uds.go new file mode 100644 index 00000000000..cc2537e000f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds.go @@ -0,0 +1,11 @@ +package statsd + +import ( + "time" +) + +/* +UDSTimeout holds the default timeout for UDS socket writes, as they can get +blocking when the receiving buffer is full. +*/ +const defaultUDSTimeout = 1 * time.Millisecond diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go new file mode 100644 index 00000000000..39d4ccb2344 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go @@ -0,0 +1,113 @@ +package statsd + +import ( + "fmt" + "net" + "time" +) + +// asyncUdsWriter is an internal class wrapping around management of UDS connection +type asyncUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + // datagramQueue is the queue of datagrams ready to be sent + datagramQueue chan []byte + stopChan chan struct{} +} + +// New returns a pointer to a new asyncUdsWriter given a socket file path as addr. +func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + + writer := &asyncUdsWriter{ + addr: udsAddr, + conn: nil, + writeTimeout: defaultUDSTimeout, + // 8192 * 8KB = 65.5MB + datagramQueue: make(chan []byte, 8192), + stopChan: make(chan struct{}, 1), + } + + go writer.sendLoop() + return writer, nil +} + +func (w *asyncUdsWriter) sendLoop() { + for { + select { + case datagram := <-w.datagramQueue: + w.write(datagram) + case <-w.stopChan: + return + } + } +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *asyncUdsWriter) Write(data []byte) (int, error) { + select { + case w.datagramQueue <- data: + return len(data), nil + default: + return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)") + } +} + +// write writes the given data to the UDS. +// This function is **not** thread safe. +func (w *asyncUdsWriter) write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, err := conn.Write(data) + + if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() { + // err is not temporary, Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + + return n, err +} + +func (w *asyncUdsWriter) Close() error { + close(w.stopChan) + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) { + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *asyncUdsWriter) unsetConnection() { + w.conn = nil +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go new file mode 100644 index 00000000000..70ee99ab31a --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go @@ -0,0 +1,92 @@ +package statsd + +import ( + "net" + "sync" + "time" +) + +// blockingUdsWriter is an internal class wrapping around management of UDS connection +type blockingUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + sync.RWMutex // used to lock conn / writer can replace it +} + +// New returns a pointer to a new blockingUdsWriter given a socket file path as addr. +func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + // Defer connection to first Write + writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout} + return writer, nil +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *blockingUdsWriter) Write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, e := conn.Write(data) + + if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() { + // Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + return n, e +} + +func (w *blockingUdsWriter) Close() error { + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) { + // Check if we've already got a socket we can use + w.RLock() + currentConn := w.conn + w.RUnlock() + + if currentConn != nil { + return currentConn, nil + } + + // Looks like we might need to connect - try again with write locking. + w.Lock() + defer w.Unlock() + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *blockingUdsWriter) unsetConnection() { + w.Lock() + defer w.Unlock() + w.conn = nil +} diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml index 629470cf61d..c5aa33dc3c2 100644 --- a/vendor/github.com/DataDog/zstd/.travis.yml +++ b/vendor/github.com/DataDog/zstd/.travis.yml @@ -1,10 +1,9 @@ -dist: xenial language: go go: + - 1.9.x - 1.10.x - 1.11.x - - 1.12.x os: - linux diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md index ca5c68c7618..6c02e168a0c 100644 --- a/vendor/github.com/DataDog/zstd/README.md +++ b/vendor/github.com/DataDog/zstd/README.md @@ -2,8 +2,8 @@ [C Zstd Homepage](https://github.com/Cyan4973/zstd) -The current headers and C files are from *v1.3.8* (Commit -[470344d](https://github.com/facebook/zstd/releases/tag/v1.3.8)). +The current headers and C files are from *v1.3.4* (Commit +[2555975](https://github.com/facebook/zstd/releases/tag/v1.3.4)). ## Usage diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h index d955bd677b5..f7f389fe0fa 100644 --- a/vendor/github.com/DataDog/zstd/bitstream.h +++ b/vendor/github.com/DataDog/zstd/bitstream.h @@ -1,7 +1,8 @@ /* ****************************************************************** bitstream Part of FSE library - Copyright (C) 2013-present, Yann Collet. + header file (to include) + Copyright (C) 2013-2017, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -48,10 +49,21 @@ extern "C" { * Dependencies ******************************************/ #include "mem.h" /* unaligned access routines */ -#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ +/*-************************************* +* Debug +***************************************/ +#if defined(BIT_DEBUG) && (BIT_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + + /*========================================= * Target specific =========================================*/ @@ -71,7 +83,8 @@ extern "C" { * A critical property of these streams is that they encode and decode in **reverse** direction. * So the first bit sequence you add will be the last to be read, like a LIFO stack. */ -typedef struct { +typedef struct +{ size_t bitContainer; unsigned bitPos; char* startPtr; @@ -105,7 +118,8 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); /*-******************************************** * bitStream decoding API (read backward) **********************************************/ -typedef struct { +typedef struct +{ size_t bitContainer; unsigned bitsConsumed; const char* ptr; @@ -222,8 +236,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, } /*! BIT_addBitsFast() : - * works only if `value` is _clean_, - * meaning all high bits above nbBits are 0 */ + * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) { @@ -339,10 +352,17 @@ MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { - U32 const regMask = sizeof(bitContainer)*8 - 1; - /* if start > regMask, bitstream is corrupted, and result is undefined */ +#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */ +# if defined(__x86_64__) + if (sizeof(bitContainer)==8) + return _bextr_u64(bitContainer, start, nbBits); + else +# endif + return _bextr_u32(bitContainer, start, nbBits); +#else assert(nbBits < BIT_MASK_SIZE); - return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; + return (bitContainer >> start) & BIT_mask[nbBits]; +#endif } MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) @@ -359,13 +379,9 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) * @return : value extracted */ MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { - /* arbitrate between double-shift and shift+mask */ -#if 1 - /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8, - * bitstream is likely corrupted, and result is undefined */ +#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); #else - /* this code path is slower on my os-x laptop */ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); #endif @@ -389,7 +405,7 @@ MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ -MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) { size_t const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); @@ -398,7 +414,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) /*! BIT_readBitsFast() : * unsafe version; only works only if nbBits >= 1 */ -MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); assert(nbBits >= 1); diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h index 7f561282ca3..e90a3bcde36 100644 --- a/vendor/github.com/DataDog/zstd/compiler.h +++ b/vendor/github.com/DataDog/zstd/compiler.h @@ -15,8 +15,6 @@ * Compiler specifics *********************************************************/ /* force inlining */ - -#if !defined(ZSTD_NO_INLINE) #if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else @@ -31,13 +29,6 @@ # define FORCE_INLINE_ATTR #endif -#else - -#define INLINE_KEYWORD -#define FORCE_INLINE_ATTR - -#endif - /** * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to elimininate the constant @@ -86,9 +77,9 @@ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. */ #ifndef DYNAMIC_BMI2 - #if ((defined(__clang__) && __has_attribute(__target__)) \ + #if (defined(__clang__) && __has_attribute(__target__)) \ || (defined(__GNUC__) \ - && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \ && (defined(__x86_64__) || defined(_M_X86)) \ && !defined(__BMI2__) # define DYNAMIC_BMI2 1 @@ -97,35 +88,15 @@ #endif #endif -/* prefetch - * can be disabled, by declaring NO_PREFETCH build macro */ -#if defined(NO_PREFETCH) -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +/* prefetch */ +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0) +#elif defined(__GNUC__) +# define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) #else -# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ -# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ -# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) -# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) -# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) -# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) -# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) -# else -# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ -# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ -# endif -#endif /* NO_PREFETCH */ - -#define CACHELINE_SIZE 64 - -#define PREFETCH_AREA(p, s) { \ - const char* const _ptr = (const char*)(p); \ - size_t const _size = (size_t)(s); \ - size_t _pos; \ - for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ - PREFETCH_L2(_ptr + _pos); \ - } \ -} +# define PREFETCH(ptr) /* disabled */ +#endif /* disable warnings */ #ifdef _MSC_VER /* Visual Studio */ diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c index b55bfb510b7..b5a3957a9b9 100644 --- a/vendor/github.com/DataDog/zstd/cover.c +++ b/vendor/github.com/DataDog/zstd/cover.c @@ -29,7 +29,6 @@ #include "mem.h" /* read */ #include "pool.h" #include "threading.h" -#include "cover.h" #include "zstd_internal.h" /* includes zstd.h */ #ifndef ZDICT_STATIC_LINKING_ONLY #define ZDICT_STATIC_LINKING_ONLY @@ -39,8 +38,7 @@ /*-************************************* * Constants ***************************************/ -#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) -#define DEFAULT_SPLITPOINT 1.0 +#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) /*-************************************* * Console display @@ -186,7 +184,7 @@ static void COVER_map_remove(COVER_map_t *map, U32 key) { } /** - * Destroys a map that is inited with COVER_map_init(). + * Destroyes a map that is inited with COVER_map_init(). */ static void COVER_map_destroy(COVER_map_t *map) { if (map->data) { @@ -205,8 +203,6 @@ typedef struct { size_t *offsets; const size_t *samplesSizes; size_t nbSamples; - size_t nbTrainSamples; - size_t nbTestSamples; U32 *suffix; size_t suffixSize; U32 *freqs; @@ -224,9 +220,9 @@ static COVER_ctx_t *g_ctx = NULL; /** * Returns the sum of the sample sizes. */ -size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { +static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { size_t sum = 0; - unsigned i; + size_t i; for (i = 0; i < nbSamples; ++i) { sum += samplesSizes[i]; } @@ -381,6 +377,14 @@ static void COVER_group(COVER_ctx_t *ctx, const void *group, ctx->suffix[dmerId] = freq; } +/** + * A segment is a range in the source as well as the score of the segment. + */ +typedef struct { + U32 begin; + U32 end; + U32 score; +} COVER_segment_t; /** * Selects the best segment in an epoch. @@ -490,10 +494,6 @@ static int COVER_checkParameters(ZDICT_cover_params_t parameters, if (parameters.d > parameters.k) { return 0; } - /* 0 < splitPoint <= 1 */ - if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){ - return 0; - } return 1; } @@ -531,44 +531,25 @@ static void COVER_ctx_destroy(COVER_ctx_t *ctx) { */ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint) { + unsigned d) { const BYTE *const samples = (const BYTE *)samplesBuffer; const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); - /* Split samples into testing and training sets */ - const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; - const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; - const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; /* Checks */ if (totalSamplesSize < MAX(d, sizeof(U64)) || totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", - (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); - return 0; - } - /* Check if there are at least 5 training samples */ - if (nbTrainSamples < 5) { - DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples); - return 0; - } - /* Check if there's testing sample */ - if (nbTestSamples < 1) { - DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples); + (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); return 0; } /* Zero the context */ memset(ctx, 0, sizeof(*ctx)); - DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, - (unsigned)trainingSamplesSize); - DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, - (unsigned)testSamplesSize); + DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, + (U32)totalSamplesSize); ctx->samples = samples; ctx->samplesSizes = samplesSizes; ctx->nbSamples = nbSamples; - ctx->nbTrainSamples = nbTrainSamples; - ctx->nbTestSamples = nbTestSamples; /* Partial suffix array */ - ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; + ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1; ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); /* Maps index to the dmerID */ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); @@ -582,7 +563,7 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, ctx->freqs = NULL; ctx->d = d; - /* Fill offsets from the samplesSizes */ + /* Fill offsets from the samlesSizes */ { U32 i; ctx->offsets[0] = 0; @@ -600,17 +581,10 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, for (i = 0; i < ctx->suffixSize; ++i) { ctx->suffix[i] = i; } - /* qsort doesn't take an opaque pointer, so pass as a global. - * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. - */ + /* qsort doesn't take an opaque pointer, so pass as a global */ g_ctx = ctx; -#if defined(__OpenBSD__) - mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), - (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#else qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); -#endif } DISPLAYLEVEL(2, "Computing frequencies\n"); /* For each dmer group (group of positions with the same first d bytes): @@ -639,11 +613,11 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, /* Divide the data up into epochs of equal size. * We will select at least one segment from each epoch. */ - const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4)); - const unsigned epochSize = (U32)(ctx->suffixSize / epochs); + const U32 epochs = (U32)(dictBufferCapacity / parameters.k); + const U32 epochSize = (U32)(ctx->suffixSize / epochs); size_t epoch; - DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", - epochs, epochSize); + DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, + epochSize); /* Loop through the epochs until there are no more segments or the dictionary * is full. */ @@ -670,7 +644,7 @@ static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); DISPLAYUPDATE( 2, "\r%u%% ", - (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); + (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); } DISPLAYLEVEL(2, "\r%79s\r", ""); return tail; @@ -684,7 +658,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( BYTE* const dict = (BYTE*)dictBuffer; COVER_ctx_t ctx; COVER_map_t activeDmers; - parameters.splitPoint = 1.0; + /* Initialize global data */ g_displayLevel = parameters.zParams.notificationLevel; /* Checks */ @@ -703,7 +677,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( } /* Initialize context and activeDmers */ if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - parameters.d, parameters.splitPoint)) { + parameters.d)) { return ERROR(GENERIC); } if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { @@ -722,7 +696,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( samplesBuffer, samplesSizes, nbSamples, parameters.zParams); if (!ZSTD_isError(dictionarySize)) { DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", - (unsigned)dictionarySize); + (U32)dictionarySize); } COVER_ctx_destroy(&ctx); COVER_map_destroy(&activeDmers); @@ -730,65 +704,28 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( } } - - -size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, - const size_t *samplesSizes, const BYTE *samples, - size_t *offsets, - size_t nbTrainSamples, size_t nbSamples, - BYTE *const dict, size_t dictBufferCapacity) { - size_t totalCompressedSize = ERROR(GENERIC); - /* Pointers */ - ZSTD_CCtx *cctx; - ZSTD_CDict *cdict; - void *dst; - /* Local variables */ - size_t dstCapacity; - size_t i; - /* Allocate dst with enough space to compress the maximum sized sample */ - { - size_t maxSampleSize = 0; - i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; - for (; i < nbSamples; ++i) { - maxSampleSize = MAX(samplesSizes[i], maxSampleSize); - } - dstCapacity = ZSTD_compressBound(maxSampleSize); - dst = malloc(dstCapacity); - } - /* Create the cctx and cdict */ - cctx = ZSTD_createCCtx(); - cdict = ZSTD_createCDict(dict, dictBufferCapacity, - parameters.zParams.compressionLevel); - if (!dst || !cctx || !cdict) { - goto _compressCleanup; - } - /* Compress each sample and sum their sizes (or error) */ - totalCompressedSize = dictBufferCapacity; - i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; - for (; i < nbSamples; ++i) { - const size_t size = ZSTD_compress_usingCDict( - cctx, dst, dstCapacity, samples + offsets[i], - samplesSizes[i], cdict); - if (ZSTD_isError(size)) { - totalCompressedSize = ERROR(GENERIC); - goto _compressCleanup; - } - totalCompressedSize += size; - } -_compressCleanup: - ZSTD_freeCCtx(cctx); - ZSTD_freeCDict(cdict); - if (dst) { - free(dst); - } - return totalCompressedSize; -} - +/** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ +typedef struct COVER_best_s { + ZSTD_pthread_mutex_t mutex; + ZSTD_pthread_cond_t cond; + size_t liveJobs; + void *dict; + size_t dictSize; + ZDICT_cover_params_t parameters; + size_t compressedSize; +} COVER_best_t; /** * Initialize the `COVER_best_t`. */ -void COVER_best_init(COVER_best_t *best) { +static void COVER_best_init(COVER_best_t *best) { if (best==NULL) return; /* compatible with init on NULL */ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); (void)ZSTD_pthread_cond_init(&best->cond, NULL); @@ -802,7 +739,7 @@ void COVER_best_init(COVER_best_t *best) { /** * Wait until liveJobs == 0. */ -void COVER_best_wait(COVER_best_t *best) { +static void COVER_best_wait(COVER_best_t *best) { if (!best) { return; } @@ -816,7 +753,7 @@ void COVER_best_wait(COVER_best_t *best) { /** * Call COVER_best_wait() and then destroy the COVER_best_t. */ -void COVER_best_destroy(COVER_best_t *best) { +static void COVER_best_destroy(COVER_best_t *best) { if (!best) { return; } @@ -832,7 +769,7 @@ void COVER_best_destroy(COVER_best_t *best) { * Called when a thread is about to be launched. * Increments liveJobs. */ -void COVER_best_start(COVER_best_t *best) { +static void COVER_best_start(COVER_best_t *best) { if (!best) { return; } @@ -846,7 +783,7 @@ void COVER_best_start(COVER_best_t *best) { * Decrements liveJobs and signals any waiting threads if liveJobs == 0. * If this dictionary is the best so far save it and its parameters. */ -void COVER_best_finish(COVER_best_t *best, size_t compressedSize, +static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, ZDICT_cover_params_t parameters, void *dict, size_t dictSize) { if (!best) { @@ -868,8 +805,6 @@ void COVER_best_finish(COVER_best_t *best, size_t compressedSize, if (!best->dict) { best->compressedSize = ERROR(GENERIC); best->dictSize = 0; - ZSTD_pthread_cond_signal(&best->cond); - ZSTD_pthread_mutex_unlock(&best->mutex); return; } } @@ -879,10 +814,10 @@ void COVER_best_finish(COVER_best_t *best, size_t compressedSize, best->parameters = parameters; best->compressedSize = compressedSize; } + ZSTD_pthread_mutex_unlock(&best->mutex); if (liveJobs == 0) { ZSTD_pthread_cond_broadcast(&best->cond); } - ZSTD_pthread_mutex_unlock(&best->mutex); } } @@ -897,7 +832,7 @@ typedef struct COVER_tryParameters_data_s { } COVER_tryParameters_data_t; /** - * Tries a set of parameters and updates the COVER_best_t with the results. + * Tries a set of parameters and upates the COVER_best_t with the results. * This function is thread safe if zstd is compiled with multithreaded support. * It takes its parameters as an *OWNING* opaque pointer to support threading. */ @@ -928,7 +863,7 @@ static void COVER_tryParameters(void *opaque) { dictBufferCapacity, parameters); dictBufferCapacity = ZDICT_finalizeDictionary( dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, + ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, parameters.zParams); if (ZDICT_isError(dictBufferCapacity)) { DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); @@ -936,10 +871,49 @@ static void COVER_tryParameters(void *opaque) { } } /* Check total compressed size */ - totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes, - ctx->samples, ctx->offsets, - ctx->nbTrainSamples, ctx->nbSamples, - dict, dictBufferCapacity); + { + /* Pointers */ + ZSTD_CCtx *cctx; + ZSTD_CDict *cdict; + void *dst; + /* Local variables */ + size_t dstCapacity; + size_t i; + /* Allocate dst with enough space to compress the maximum sized sample */ + { + size_t maxSampleSize = 0; + for (i = 0; i < ctx->nbSamples; ++i) { + maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); + } + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); + } + /* Create the cctx and cdict */ + cctx = ZSTD_createCCtx(); + cdict = ZSTD_createCDict(dict, dictBufferCapacity, + parameters.zParams.compressionLevel); + if (!dst || !cctx || !cdict) { + goto _compressCleanup; + } + /* Compress each sample and sum their sizes (or error) */ + totalCompressedSize = dictBufferCapacity; + for (i = 0; i < ctx->nbSamples; ++i) { + const size_t size = ZSTD_compress_usingCDict( + cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], + ctx->samplesSizes[i], cdict); + if (ZSTD_isError(size)) { + totalCompressedSize = ERROR(GENERIC); + goto _compressCleanup; + } + totalCompressedSize += size; + } + _compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst) { + free(dst); + } + } _cleanup: COVER_best_finish(data->best, totalCompressedSize, parameters, dict, @@ -960,8 +934,6 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( ZDICT_cover_params_t *parameters) { /* constants */ const unsigned nbThreads = parameters->nbThreads; - const double splitPoint = - parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; @@ -979,10 +951,6 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( POOL_ctx *pool = NULL; /* Checks */ - if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); - return ERROR(GENERIC); - } if (kMinK < kMaxD || kMaxK < kMinK) { LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); return ERROR(GENERIC); @@ -1013,7 +981,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( /* Initialize the context for this value of d */ COVER_ctx_t ctx; LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); - if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint)) { + if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); COVER_best_destroy(&best); POOL_free(pool); @@ -1038,7 +1006,6 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( data->parameters = *parameters; data->parameters.k = k; data->parameters.d = d; - data->parameters.splitPoint = splitPoint; data->parameters.steps = kSteps; data->parameters.zParams.notificationLevel = g_displayLevel; /* Check the parameters */ @@ -1056,7 +1023,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( } /* Print status */ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); + (U32)((iteration * 100) / kIterations)); ++iteration; } COVER_best_wait(&best); diff --git a/vendor/github.com/DataDog/zstd/cover.h b/vendor/github.com/DataDog/zstd/cover.h deleted file mode 100644 index 82e2e1cea43..00000000000 --- a/vendor/github.com/DataDog/zstd/cover.h +++ /dev/null @@ -1,83 +0,0 @@ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "zstd_internal.h" /* includes zstd.h */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - -/** - * COVER_best_t is used for two purposes: - * 1. Synchronizing threads. - * 2. Saving the best parameters and dictionary. - * - * All of the methods except COVER_best_init() are thread safe if zstd is - * compiled with multithreaded support. - */ -typedef struct COVER_best_s { - ZSTD_pthread_mutex_t mutex; - ZSTD_pthread_cond_t cond; - size_t liveJobs; - void *dict; - size_t dictSize; - ZDICT_cover_params_t parameters; - size_t compressedSize; -} COVER_best_t; - -/** - * A segment is a range in the source as well as the score of the segment. - */ -typedef struct { - U32 begin; - U32 end; - U32 score; -} COVER_segment_t; - -/** - * Checks total compressed size of a dictionary - */ -size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, - const size_t *samplesSizes, const BYTE *samples, - size_t *offsets, - size_t nbTrainSamples, size_t nbSamples, - BYTE *const dict, size_t dictBufferCapacity); - -/** - * Returns the sum of the sample sizes. - */ -size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ; - -/** - * Initialize the `COVER_best_t`. - */ -void COVER_best_init(COVER_best_t *best); - -/** - * Wait until liveJobs == 0. - */ -void COVER_best_wait(COVER_best_t *best); - -/** - * Call COVER_best_wait() and then destroy the COVER_best_t. - */ -void COVER_best_destroy(COVER_best_t *best); - -/** - * Called when a thread is about to be launched. - * Increments liveJobs. - */ -void COVER_best_start(COVER_best_t *best); - -/** - * Called when a thread finishes executing, both on error or success. - * Decrements liveJobs and signals any waiting threads if liveJobs == 0. - * If this dictionary is the best so far save it and its parameters. - */ -void COVER_best_finish(COVER_best_t *best, size_t compressedSize, - ZDICT_cover_params_t parameters, void *dict, - size_t dictSize); diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h index 5f0923fc928..4eb48e39e10 100644 --- a/vendor/github.com/DataDog/zstd/cpu.h +++ b/vendor/github.com/DataDog/zstd/cpu.h @@ -36,7 +36,7 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { U32 f1d = 0; U32 f7b = 0; U32 f7c = 0; -#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) +#ifdef _MSC_VER int reg[4]; __cpuid((int*)reg, 0); { @@ -72,13 +72,14 @@ MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { "cpuid\n\t" "popl %%ebx\n\t" : "=a"(f1a), "=c"(f1c), "=d"(f1d) - : "a"(1)); + : "a"(1) + :); } if (n >= 7) { __asm__( "pushl %%ebx\n\t" "cpuid\n\t" - "movl %%ebx, %%eax\n\t" + "movl %%ebx, %%eax\n\r" "popl %%ebx" : "=a"(f7b), "=c"(f7c) : "a"(7), "c"(0) diff --git a/vendor/github.com/DataDog/zstd/debug.c b/vendor/github.com/DataDog/zstd/debug.c deleted file mode 100644 index 3ebdd1cb15a..00000000000 --- a/vendor/github.com/DataDog/zstd/debug.c +++ /dev/null @@ -1,44 +0,0 @@ -/* ****************************************************************** - debug - Part of FSE library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -****************************************************************** */ - - -/* - * This module only hosts one global variable - * which can be used to dynamically influence the verbosity of traces, - * such as DEBUGLOG and RAWLOG - */ - -#include "debug.h" - -int g_debuglevel = DEBUGLEVEL; diff --git a/vendor/github.com/DataDog/zstd/debug.h b/vendor/github.com/DataDog/zstd/debug.h deleted file mode 100644 index b4fc89d4974..00000000000 --- a/vendor/github.com/DataDog/zstd/debug.h +++ /dev/null @@ -1,134 +0,0 @@ -/* ****************************************************************** - debug - Part of FSE library - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -****************************************************************** */ - - -/* - * The purpose of this header is to enable debug functions. - * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time, - * and DEBUG_STATIC_ASSERT() for compile-time. - * - * By default, DEBUGLEVEL==0, which means run-time debug is disabled. - * - * Level 1 enables assert() only. - * Starting level 2, traces can be generated and pushed to stderr. - * The higher the level, the more verbose the traces. - * - * It's possible to dynamically adjust level using variable g_debug_level, - * which is only declared if DEBUGLEVEL>=2, - * and is a global variable, not multi-thread protected (use with care) - */ - -#ifndef DEBUG_H_12987983217 -#define DEBUG_H_12987983217 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/* static assert is triggered at compile time, leaving no runtime artefact. - * static assert only works with compile-time constants. - * Also, this variant can only be used inside a function. */ -#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) - - -/* DEBUGLEVEL is expected to be defined externally, - * typically through compiler command line. - * Value must be a number. */ -#ifndef DEBUGLEVEL -# define DEBUGLEVEL 0 -#endif - - -/* DEBUGFILE can be defined externally, - * typically through compiler command line. - * note : currently useless. - * Value must be stderr or stdout */ -#ifndef DEBUGFILE -# define DEBUGFILE stderr -#endif - - -/* recommended values for DEBUGLEVEL : - * 0 : release mode, no debug, all run-time checks disabled - * 1 : enables assert() only, no display - * 2 : reserved, for currently active debug path - * 3 : events once per object lifetime (CCtx, CDict, etc.) - * 4 : events once per frame - * 5 : events once per block - * 6 : events once per sequence (verbose) - * 7+: events at every position (*very* verbose) - * - * It's generally inconvenient to output traces > 5. - * In which case, it's possible to selectively trigger high verbosity levels - * by modifying g_debug_level. - */ - -#if (DEBUGLEVEL>=1) -# include -#else -# ifndef assert /* assert may be already defined, due to prior #include */ -# define assert(condition) ((void)0) /* disable assert (default) */ -# endif -#endif - -#if (DEBUGLEVEL>=2) -# include -extern int g_debuglevel; /* the variable is only declared, - it actually lives in debug.c, - and is shared by the whole process. - It's not thread-safe. - It's useful when enabling very verbose levels - on selective conditions (such as position in src) */ - -# define RAWLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - fprintf(stderr, __VA_ARGS__); \ - } } -# define DEBUGLOG(l, ...) { \ - if (l<=g_debuglevel) { \ - fprintf(stderr, __FILE__ ": " __VA_ARGS__); \ - fprintf(stderr, " \n"); \ - } } -#else -# define RAWLOG(l, ...) {} /* disabled */ -# define DEBUGLOG(l, ...) {} /* disabled */ -#endif - - -#if defined (__cplusplus) -} -#endif - -#endif /* DEBUG_H_12987983217 */ diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c index ead9220442b..60cceb08832 100644 --- a/vendor/github.com/DataDog/zstd/divsufsort.c +++ b/vendor/github.com/DataDog/zstd/divsufsort.c @@ -1637,7 +1637,7 @@ construct_SA(const unsigned char *T, int *SA, if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } - assert(k < j); assert(k != NULL); + assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); @@ -1701,7 +1701,7 @@ construct_BWT(const unsigned char *T, int *SA, if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } - assert(k < j); assert(k != NULL); + assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; @@ -1785,7 +1785,7 @@ construct_BWT_indexes(const unsigned char *T, int *SA, if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } - assert(k < j); assert(k != NULL); + assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c index b12944e1de9..b37a082fee2 100644 --- a/vendor/github.com/DataDog/zstd/entropy_common.c +++ b/vendor/github.com/DataDog/zstd/entropy_common.c @@ -72,21 +72,7 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t unsigned charnum = 0; int previous0 = 0; - if (hbSize < 4) { - /* This function only works when hbSize >= 4 */ - char buffer[4]; - memset(buffer, 0, sizeof(buffer)); - memcpy(buffer, headerBuffer, hbSize); - { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, - buffer, sizeof(buffer)); - if (FSE_isError(countSize)) return countSize; - if (countSize > hbSize) return ERROR(corruption_detected); - return countSize; - } } - assert(hbSize >= 4); - - /* init */ - memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ + if (hbSize < 4) return ERROR(srcSize_wrong); bitStream = MEM_readLE32(ip); nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); @@ -119,7 +105,6 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); while (charnum < n0) normalizedCounter[charnum++] = 0; if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { - assert((bitCount >> 3) <= 3); /* For first condition to work */ ip += bitCount>>3; bitCount &= 7; bitStream = MEM_readLE32(ip) >> bitCount; diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c index 7c1bb67a23f..d004ee636c6 100644 --- a/vendor/github.com/DataDog/zstd/error_private.c +++ b/vendor/github.com/DataDog/zstd/error_private.c @@ -14,10 +14,6 @@ const char* ERR_getErrorString(ERR_enum code) { -#ifdef ZSTD_STRIP_ERROR_STRINGS - (void)code; - return "Error strings stripped"; -#else static const char* const notErrorCode = "Unspecified error code"; switch( code ) { @@ -43,12 +39,10 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; case PREFIX(srcSize_wrong): return "Src size is incorrect"; - case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; /* following error codes are not stable and may be removed or changed in a future version */ case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(maxCode): default: return notErrorCode; } -#endif } diff --git a/vendor/github.com/DataDog/zstd/fastcover.c b/vendor/github.com/DataDog/zstd/fastcover.c deleted file mode 100644 index c289c069014..00000000000 --- a/vendor/github.com/DataDog/zstd/fastcover.c +++ /dev/null @@ -1,728 +0,0 @@ -/*-************************************* -* Dependencies -***************************************/ -#include /* fprintf */ -#include /* malloc, free, qsort */ -#include /* memset */ -#include /* clock */ - -#include "mem.h" /* read */ -#include "pool.h" -#include "threading.h" -#include "cover.h" -#include "zstd_internal.h" /* includes zstd.h */ -#ifndef ZDICT_STATIC_LINKING_ONLY -#define ZDICT_STATIC_LINKING_ONLY -#endif -#include "zdict.h" - - -/*-************************************* -* Constants -***************************************/ -#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) -#define FASTCOVER_MAX_F 31 -#define FASTCOVER_MAX_ACCEL 10 -#define DEFAULT_SPLITPOINT 0.75 -#define DEFAULT_F 20 -#define DEFAULT_ACCEL 1 - - -/*-************************************* -* Console display -***************************************/ -static int g_displayLevel = 2; -#define DISPLAY(...) \ - { \ - fprintf(stderr, __VA_ARGS__); \ - fflush(stderr); \ - } -#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - DISPLAY(__VA_ARGS__); \ - } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ -#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) - -#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ - if (displayLevel >= l) { \ - if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ - g_time = clock(); \ - DISPLAY(__VA_ARGS__); \ - } \ - } -#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) -static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; -static clock_t g_time = 0; - - -/*-************************************* -* Hash Functions -***************************************/ -static const U64 prime6bytes = 227718039650203ULL; -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } -static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } - -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } -static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } - - -/** - * Hash the d-byte value pointed to by p and mod 2^f - */ -static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { - if (d == 6) { - return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); - } - return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); -} - - -/*-************************************* -* Acceleration -***************************************/ -typedef struct { - unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ - unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ -} FASTCOVER_accel_t; - - -static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { - { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ - { 100, 0 }, /* accel = 1 */ - { 50, 1 }, /* accel = 2 */ - { 34, 2 }, /* accel = 3 */ - { 25, 3 }, /* accel = 4 */ - { 20, 4 }, /* accel = 5 */ - { 17, 5 }, /* accel = 6 */ - { 14, 6 }, /* accel = 7 */ - { 13, 7 }, /* accel = 8 */ - { 11, 8 }, /* accel = 9 */ - { 10, 9 }, /* accel = 10 */ -}; - - -/*-************************************* -* Context -***************************************/ -typedef struct { - const BYTE *samples; - size_t *offsets; - const size_t *samplesSizes; - size_t nbSamples; - size_t nbTrainSamples; - size_t nbTestSamples; - size_t nbDmers; - U32 *freqs; - unsigned d; - unsigned f; - FASTCOVER_accel_t accelParams; -} FASTCOVER_ctx_t; - - -/*-************************************* -* Helper functions -***************************************/ -/** - * Selects the best segment in an epoch. - * Segments of are scored according to the function: - * - * Let F(d) be the frequency of all dmers with hash value d. - * Let S_i be hash value of the dmer at position i of segment S which has length k. - * - * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) - * - * Once the dmer with hash value d is in the dictionay we set F(d) = 0. - */ -static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, - U32 *freqs, U32 begin, U32 end, - ZDICT_cover_params_t parameters, - U16* segmentFreqs) { - /* Constants */ - const U32 k = parameters.k; - const U32 d = parameters.d; - const U32 f = ctx->f; - const U32 dmersInK = k - d + 1; - - /* Try each segment (activeSegment) and save the best (bestSegment) */ - COVER_segment_t bestSegment = {0, 0, 0}; - COVER_segment_t activeSegment; - - /* Reset the activeDmers in the segment */ - /* The activeSegment starts at the beginning of the epoch. */ - activeSegment.begin = begin; - activeSegment.end = begin; - activeSegment.score = 0; - - /* Slide the activeSegment through the whole epoch. - * Save the best segment in bestSegment. - */ - while (activeSegment.end < end) { - /* Get hash value of current dmer */ - const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); - - /* Add frequency of this index to score if this is the first occurence of index in active segment */ - if (segmentFreqs[idx] == 0) { - activeSegment.score += freqs[idx]; - } - /* Increment end of segment and segmentFreqs*/ - activeSegment.end += 1; - segmentFreqs[idx] += 1; - /* If the window is now too large, drop the first position */ - if (activeSegment.end - activeSegment.begin == dmersInK + 1) { - /* Get hash value of the dmer to be eliminated from active segment */ - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); - segmentFreqs[delIndex] -= 1; - /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ - if (segmentFreqs[delIndex] == 0) { - activeSegment.score -= freqs[delIndex]; - } - /* Increment start of segment */ - activeSegment.begin += 1; - } - - /* If this segment is the best so far save it */ - if (activeSegment.score > bestSegment.score) { - bestSegment = activeSegment; - } - } - - /* Zero out rest of segmentFreqs array */ - while (activeSegment.begin < end) { - const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); - segmentFreqs[delIndex] -= 1; - activeSegment.begin += 1; - } - - { - /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ - U32 pos; - for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { - const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); - freqs[i] = 0; - } - } - - return bestSegment; -} - - -static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, - size_t maxDictSize, unsigned f, - unsigned accel) { - /* k, d, and f are required parameters */ - if (parameters.d == 0 || parameters.k == 0) { - return 0; - } - /* d has to be 6 or 8 */ - if (parameters.d != 6 && parameters.d != 8) { - return 0; - } - /* k <= maxDictSize */ - if (parameters.k > maxDictSize) { - return 0; - } - /* d <= k */ - if (parameters.d > parameters.k) { - return 0; - } - /* 0 < f <= FASTCOVER_MAX_F*/ - if (f > FASTCOVER_MAX_F || f == 0) { - return 0; - } - /* 0 < splitPoint <= 1 */ - if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { - return 0; - } - /* 0 < accel <= 10 */ - if (accel > 10 || accel == 0) { - return 0; - } - return 1; -} - - -/** - * Clean up a context initialized with `FASTCOVER_ctx_init()`. - */ -static void -FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) -{ - if (!ctx) return; - - free(ctx->freqs); - ctx->freqs = NULL; - - free(ctx->offsets); - ctx->offsets = NULL; -} - - -/** - * Calculate for frequency of hash value of each dmer in ctx->samples - */ -static void -FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) -{ - const unsigned f = ctx->f; - const unsigned d = ctx->d; - const unsigned skip = ctx->accelParams.skip; - const unsigned readLength = MAX(d, 8); - size_t i; - assert(ctx->nbTrainSamples >= 5); - assert(ctx->nbTrainSamples <= ctx->nbSamples); - for (i = 0; i < ctx->nbTrainSamples; i++) { - size_t start = ctx->offsets[i]; /* start of current dmer */ - size_t const currSampleEnd = ctx->offsets[i+1]; - while (start + readLength <= currSampleEnd) { - const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); - freqs[dmerIndex]++; - start = start + skip + 1; - } - } -} - - -/** - * Prepare a context for dictionary building. - * The context is only dependent on the parameter `d` and can used multiple - * times. - * Returns 1 on success or zero on error. - * The context must be destroyed with `FASTCOVER_ctx_destroy()`. - */ -static int -FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - unsigned d, double splitPoint, unsigned f, - FASTCOVER_accel_t accelParams) -{ - const BYTE* const samples = (const BYTE*)samplesBuffer; - const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); - /* Split samples into testing and training sets */ - const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; - const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; - const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; - const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; - - /* Checks */ - if (totalSamplesSize < MAX(d, sizeof(U64)) || - totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { - DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", - (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); - return 0; - } - - /* Check if there are at least 5 training samples */ - if (nbTrainSamples < 5) { - DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); - return 0; - } - - /* Check if there's testing sample */ - if (nbTestSamples < 1) { - DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); - return 0; - } - - /* Zero the context */ - memset(ctx, 0, sizeof(*ctx)); - DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, - (unsigned)trainingSamplesSize); - DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, - (unsigned)testSamplesSize); - - ctx->samples = samples; - ctx->samplesSizes = samplesSizes; - ctx->nbSamples = nbSamples; - ctx->nbTrainSamples = nbTrainSamples; - ctx->nbTestSamples = nbTestSamples; - ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; - ctx->d = d; - ctx->f = f; - ctx->accelParams = accelParams; - - /* The offsets of each file */ - ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); - if (ctx->offsets == NULL) { - DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); - FASTCOVER_ctx_destroy(ctx); - return 0; - } - - /* Fill offsets from the samplesSizes */ - { U32 i; - ctx->offsets[0] = 0; - assert(nbSamples >= 5); - for (i = 1; i <= nbSamples; ++i) { - ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; - } - } - - /* Initialize frequency array of size 2^f */ - ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); - if (ctx->freqs == NULL) { - DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); - FASTCOVER_ctx_destroy(ctx); - return 0; - } - - DISPLAYLEVEL(2, "Computing frequencies\n"); - FASTCOVER_computeFrequency(ctx->freqs, ctx); - - return 1; -} - - -/** - * Given the prepared context build the dictionary. - */ -static size_t -FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, - U32* freqs, - void* dictBuffer, size_t dictBufferCapacity, - ZDICT_cover_params_t parameters, - U16* segmentFreqs) -{ - BYTE *const dict = (BYTE *)dictBuffer; - size_t tail = dictBufferCapacity; - /* Divide the data up into epochs of equal size. - * We will select at least one segment from each epoch. - */ - const unsigned epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k)); - const unsigned epochSize = (U32)(ctx->nbDmers / epochs); - size_t epoch; - DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", - epochs, epochSize); - /* Loop through the epochs until there are no more segments or the dictionary - * is full. - */ - for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { - const U32 epochBegin = (U32)(epoch * epochSize); - const U32 epochEnd = epochBegin + epochSize; - size_t segmentSize; - /* Select a segment */ - COVER_segment_t segment = FASTCOVER_selectSegment( - ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); - - /* If the segment covers no dmers, then we are out of content */ - if (segment.score == 0) { - break; - } - - /* Trim the segment if necessary and if it is too small then we are done */ - segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); - if (segmentSize < parameters.d) { - break; - } - - /* We fill the dictionary from the back to allow the best segments to be - * referenced with the smallest offsets. - */ - tail -= segmentSize; - memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); - DISPLAYUPDATE( - 2, "\r%u%% ", - (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); - } - DISPLAYLEVEL(2, "\r%79s\r", ""); - return tail; -} - - -/** - * Parameters for FASTCOVER_tryParameters(). - */ -typedef struct FASTCOVER_tryParameters_data_s { - const FASTCOVER_ctx_t* ctx; - COVER_best_t* best; - size_t dictBufferCapacity; - ZDICT_cover_params_t parameters; -} FASTCOVER_tryParameters_data_t; - - -/** - * Tries a set of parameters and updates the COVER_best_t with the results. - * This function is thread safe if zstd is compiled with multithreaded support. - * It takes its parameters as an *OWNING* opaque pointer to support threading. - */ -static void FASTCOVER_tryParameters(void *opaque) -{ - /* Save parameters as local variables */ - FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; - const FASTCOVER_ctx_t *const ctx = data->ctx; - const ZDICT_cover_params_t parameters = data->parameters; - size_t dictBufferCapacity = data->dictBufferCapacity; - size_t totalCompressedSize = ERROR(GENERIC); - /* Initialize array to keep track of frequency of dmer within activeSegment */ - U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16)); - /* Allocate space for hash table, dict, and freqs */ - BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); - U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); - if (!segmentFreqs || !dict || !freqs) { - DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); - goto _cleanup; - } - /* Copy the frequencies because we need to modify them */ - memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); - /* Build the dictionary */ - { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, - parameters, segmentFreqs); - const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); - dictBufferCapacity = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams); - if (ZDICT_isError(dictBufferCapacity)) { - DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); - goto _cleanup; - } - } - /* Check total compressed size */ - totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes, - ctx->samples, ctx->offsets, - ctx->nbTrainSamples, ctx->nbSamples, - dict, dictBufferCapacity); -_cleanup: - COVER_best_finish(data->best, totalCompressedSize, parameters, dict, - dictBufferCapacity); - free(data); - free(segmentFreqs); - free(dict); - free(freqs); -} - - -static void -FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, - ZDICT_cover_params_t* coverParams) -{ - coverParams->k = fastCoverParams.k; - coverParams->d = fastCoverParams.d; - coverParams->steps = fastCoverParams.steps; - coverParams->nbThreads = fastCoverParams.nbThreads; - coverParams->splitPoint = fastCoverParams.splitPoint; - coverParams->zParams = fastCoverParams.zParams; -} - - -static void -FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, - ZDICT_fastCover_params_t* fastCoverParams, - unsigned f, unsigned accel) -{ - fastCoverParams->k = coverParams.k; - fastCoverParams->d = coverParams.d; - fastCoverParams->steps = coverParams.steps; - fastCoverParams->nbThreads = coverParams.nbThreads; - fastCoverParams->splitPoint = coverParams.splitPoint; - fastCoverParams->f = f; - fastCoverParams->accel = accel; - fastCoverParams->zParams = coverParams.zParams; -} - - -ZDICTLIB_API size_t -ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t parameters) -{ - BYTE* const dict = (BYTE*)dictBuffer; - FASTCOVER_ctx_t ctx; - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - /* Initialize global data */ - g_displayLevel = parameters.zParams.notificationLevel; - /* Assign splitPoint and f if not provided */ - parameters.splitPoint = 1.0; - parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; - parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; - /* Convert to cover parameter */ - memset(&coverParams, 0 , sizeof(coverParams)); - FASTCOVER_convertToCoverParams(parameters, &coverParams); - /* Checks */ - if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, - parameters.accel)) { - DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); - return ERROR(GENERIC); - } - if (nbSamples == 0) { - DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); - return ERROR(GENERIC); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - /* Assign corresponding FASTCOVER_accel_t to accelParams*/ - accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; - /* Initialize context */ - if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, - coverParams.d, parameters.splitPoint, parameters.f, - accelParams)) { - DISPLAYLEVEL(1, "Failed to initialize context\n"); - return ERROR(GENERIC); - } - /* Build the dictionary */ - DISPLAYLEVEL(2, "Building dictionary\n"); - { - /* Initialize array to keep track of frequency of dmer within activeSegment */ - U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); - const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, - dictBufferCapacity, coverParams, segmentFreqs); - const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); - const size_t dictionarySize = ZDICT_finalizeDictionary( - dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, - samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); - if (!ZSTD_isError(dictionarySize)) { - DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", - (unsigned)dictionarySize); - } - FASTCOVER_ctx_destroy(&ctx); - free(segmentFreqs); - return dictionarySize; - } -} - - -ZDICTLIB_API size_t -ZDICT_optimizeTrainFromBuffer_fastCover( - void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t* parameters) -{ - ZDICT_cover_params_t coverParams; - FASTCOVER_accel_t accelParams; - /* constants */ - const unsigned nbThreads = parameters->nbThreads; - const double splitPoint = - parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; - const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; - const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; - const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; - const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; - const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; - const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); - const unsigned kIterations = - (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); - const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; - const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; - /* Local variables */ - const int displayLevel = parameters->zParams.notificationLevel; - unsigned iteration = 1; - unsigned d; - unsigned k; - COVER_best_t best; - POOL_ctx *pool = NULL; - /* Checks */ - if (splitPoint <= 0 || splitPoint > 1) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); - return ERROR(GENERIC); - } - if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); - return ERROR(GENERIC); - } - if (kMinK < kMaxD || kMaxK < kMinK) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); - return ERROR(GENERIC); - } - if (nbSamples == 0) { - LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); - return ERROR(GENERIC); - } - if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { - LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", - ZDICT_DICTSIZE_MIN); - return ERROR(dstSize_tooSmall); - } - if (nbThreads > 1) { - pool = POOL_create(nbThreads, 1); - if (!pool) { - return ERROR(memory_allocation); - } - } - /* Initialization */ - COVER_best_init(&best); - memset(&coverParams, 0 , sizeof(coverParams)); - FASTCOVER_convertToCoverParams(*parameters, &coverParams); - accelParams = FASTCOVER_defaultAccelParameters[accel]; - /* Turn down global display level to clean up display at level 2 and below */ - g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; - /* Loop through d first because each new value needs a new context */ - LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", - kIterations); - for (d = kMinD; d <= kMaxD; d += 2) { - /* Initialize the context for this value of d */ - FASTCOVER_ctx_t ctx; - LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); - if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams)) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); - COVER_best_destroy(&best); - POOL_free(pool); - return ERROR(GENERIC); - } - /* Loop through k reusing the same context */ - for (k = kMinK; k <= kMaxK; k += kStepSize) { - /* Prepare the arguments */ - FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( - sizeof(FASTCOVER_tryParameters_data_t)); - LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); - if (!data) { - LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); - COVER_best_destroy(&best); - FASTCOVER_ctx_destroy(&ctx); - POOL_free(pool); - return ERROR(GENERIC); - } - data->ctx = &ctx; - data->best = &best; - data->dictBufferCapacity = dictBufferCapacity; - data->parameters = coverParams; - data->parameters.k = k; - data->parameters.d = d; - data->parameters.splitPoint = splitPoint; - data->parameters.steps = kSteps; - data->parameters.zParams.notificationLevel = g_displayLevel; - /* Check the parameters */ - if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, - data->ctx->f, accel)) { - DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); - free(data); - continue; - } - /* Call the function and pass ownership of data to it */ - COVER_best_start(&best); - if (pool) { - POOL_add(pool, &FASTCOVER_tryParameters, data); - } else { - FASTCOVER_tryParameters(data); - } - /* Print status */ - LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", - (unsigned)((iteration * 100) / kIterations)); - ++iteration; - } - COVER_best_wait(&best); - FASTCOVER_ctx_destroy(&ctx); - } - LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); - /* Fill the output buffer and parameters with output of the best parameters */ - { - const size_t dictSize = best.dictSize; - if (ZSTD_isError(best.compressedSize)) { - const size_t compressedSize = best.compressedSize; - COVER_best_destroy(&best); - POOL_free(pool); - return compressedSize; - } - FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); - memcpy(dictBuffer, best.dict, dictSize); - COVER_best_destroy(&best); - POOL_free(pool); - return dictSize; - } - -} diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h index f72c519b259..6a1d272be5c 100644 --- a/vendor/github.com/DataDog/zstd/fse.h +++ b/vendor/github.com/DataDog/zstd/fse.h @@ -72,7 +72,6 @@ extern "C" { #define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ - /*-**************************************** * FSE simple functions ******************************************/ @@ -130,7 +129,7 @@ FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, ******************************************/ /*! FSE_compress() does the following: -1. count symbol occurrence from source[] into table count[] (see hist.h) +1. count symbol occurrence from source[] into table count[] 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) 3. save normalized counters to memory buffer using writeNCount() 4. build encoding table 'CTable' from normalized counters @@ -148,6 +147,15 @@ or to save and provide normalized distribution using external method. /* *** COMPRESSION *** */ +/*! FSE_count(): + Provides the precise count of each byte within a table 'count'. + 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + *maxSymbolValuePtr will be updated if detected smaller than initial value. + @return : the count of the most frequent symbol (which is not identified). + if return == srcSize, there is only one symbol. + Can also return an error code, which can be tested with FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + /*! FSE_optimalTableLog(): dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @@ -159,8 +167,7 @@ FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). @return : tableLog, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, - const unsigned* count, size_t srcSize, unsigned maxSymbolValue); +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. @@ -171,9 +178,8 @@ FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tab Compactly save 'normalizedCounter' into 'buffer'. @return : size of the compressed table, or an errorCode, which can be tested using FSE_isError(). */ -FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, - const short* normalizedCounter, - unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ @@ -244,9 +250,7 @@ If there is an error, the function will return an ErrorCode (which can be tested @return : size read from 'rBuffer', or an errorCode, which can be tested using FSE_isError(). maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ -FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, - unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, - const void* rBuffer, size_t rBuffSize); +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); /*! Constructor and Destructor of FSE_DTable. Note that its size depends on 'tableLog' */ @@ -321,8 +325,33 @@ If there is an error, the function will return an error code, which can be teste /* ***************************************** - * FSE advanced API - ***************************************** */ +* FSE advanced API +*******************************************/ +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned + */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace); + +/** FSE_countFast() : + * same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr + */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` must be a table of minimum `1024` unsigned + */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); + +/*! FSE_count_simple() : + * Same as FSE_countFast(), but does not use any additional memory (not even on stack). + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + + unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /**< same as FSE_optimalTableLog(), which used `minus==2` */ @@ -512,7 +541,7 @@ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) const U32 tableLog = MEM_read16(ptr); statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; - statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1); + statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); statePtr->stateLog = tableLog; } @@ -531,7 +560,7 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3 } } -MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) { FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); @@ -547,39 +576,6 @@ MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePt } -/* FSE_getMaxNbBits() : - * Approximate maximum cost of a symbol, in bits. - * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ -MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) -{ - const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; - return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16; -} - -/* FSE_bitCost() : - * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) - * note 1 : assume symbolValue is valid (<= maxSymbolValue) - * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ -MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) -{ - const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; - U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; - U32 const threshold = (minNbBits+1) << 16; - assert(tableLog < 16); - assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */ - { U32 const tableSize = 1 << tableLog; - U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); - U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */ - U32 const bitMultiplier = 1 << accuracyLog; - assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); - assert(normalizedDeltaFromThreshold <= bitMultiplier); - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold; - } -} - - /* ====== Decompression ====== */ typedef struct { diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c index 60f357bbd24..cb8f1fa3233 100644 --- a/vendor/github.com/DataDog/zstd/fse_compress.c +++ b/vendor/github.com/DataDog/zstd/fse_compress.c @@ -1,6 +1,6 @@ /* ****************************************************************** FSE : Finite State Entropy encoder - Copyright (C) 2013-present, Yann Collet. + Copyright (C) 2013-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -37,11 +37,9 @@ ****************************************************************/ #include /* malloc, free, qsort */ #include /* memcpy, memset */ -#include "compiler.h" -#include "mem.h" /* U32, U16, etc. */ -#include "debug.h" /* assert, DEBUGLOG */ -#include "hist.h" /* HIST_count_wksp */ +#include /* printf (debug) */ #include "bitstream.h" +#include "compiler.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #include "error_private.h" @@ -51,6 +49,7 @@ * Error Management ****************************************************************/ #define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* ************************************************************** @@ -83,9 +82,7 @@ * wkspSize should be sized to handle worst case situation, which is `1< wkspSize) return ERROR(tableLog_tooLarge); tableU16[-2] = (U16) tableLog; tableU16[-1] = (U16) maxSymbolValue; - assert(tableLog < 16); /* required for threshold strategy to work */ /* For explanations on how to distribute symbol values over the table : - * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ - - #ifdef __clang_analyzer__ - memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ - #endif + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ /* symbol start positions */ { U32 u; cumul[0] = 0; - for (u=1; u <= maxSymbolValue+1; u++) { + for (u=1; u<=maxSymbolValue+1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); @@ -130,15 +122,13 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, U32 symbol; for (symbol=0; symbol<=maxSymbolValue; symbol++) { int nbOccurences; - int const freq = normalizedCounter[symbol]; - for (nbOccurences=0; nbOccurences highThreshold) - position = (position + step) & tableMask; /* Low proba area */ + while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } - assert(position==0); /* Must have initialized all positions */ + if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ } /* Build table */ @@ -153,10 +143,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, for (s=0; s<=maxSymbolValue; s++) { switch (normalizedCounter[s]) { - case 0: - /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */ - symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<1)) { /* stops at 1 */ - if (previousIs0) { - unsigned start = symbol; - while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++; - if (symbol == alphabetSize) break; /* incorrect distribution */ - while (symbol >= start+24) { + while (remaining>1) { /* stops at 1 */ + if (previous0) { + unsigned start = charnum; + while (!normalizedCounter[charnum]) charnum++; + while (charnum >= start+24) { start+=24; bitStream += 0xFFFFU << bitCount; - if ((!writeIsSafe) && (out > oend-2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ + if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE) bitStream; out[1] = (BYTE)(bitStream>>8); out+=2; bitStream>>=16; } - while (symbol >= start+3) { + while (charnum >= start+3) { start+=3; bitStream += 3 << bitCount; bitCount += 2; } - bitStream += (symbol-start) << bitCount; + bitStream += (charnum-start) << bitCount; bitCount += 2; if (bitCount>16) { - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; bitStream >>= 16; bitCount -= 16; } } - { int count = normalizedCounter[symbol++]; - int const max = (2*threshold-1) - remaining; + { int count = normalizedCounter[charnum++]; + int const max = (2*threshold-1)-remaining; remaining -= count < 0 ? -count : count; count++; /* +1 for extra accuracy */ - if (count>=threshold) - count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ + if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ bitStream += count << bitCount; bitCount += nbBits; bitCount -= (count>=1; } } if (bitCount>16) { - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out += 2; @@ -290,23 +259,19 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize, bitCount -= 16; } } - if (remaining != 1) - return ERROR(GENERIC); /* incorrect normalized distribution */ - assert(symbol <= alphabetSize); - /* flush remaining bitStream */ - if ((!writeIsSafe) && (out > oend - 2)) - return ERROR(dstSize_tooSmall); /* Buffer overflow */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ out[0] = (BYTE)bitStream; out[1] = (BYTE)(bitStream>>8); out+= (bitCount+7) /8; + if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); + return (out-ostart); } -size_t FSE_writeNCount (void* buffer, size_t bufferSize, - const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) { if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ @@ -314,13 +279,179 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); - return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */); + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); +} + + + +/*-************************************************************** +* Counting histogram +****************************************************************/ +/*! FSE_count_simple + This function counts byte values within `src`, and store the histogram into table `count`. + It doesn't use any additional memory. + But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. + For this reason, prefer using a table `count` with 256 elements. + @return : count of most numerous element. +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + + memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); + if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } + + while (ip max) max = count[s]; } + + return (size_t)max; +} + + +/* FSE_count_parallel_wksp() : + * Same as FSE_count_parallel(), but using an externally provided scratch buffer. + * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`. + * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ +static size_t FSE_count_parallel_wksp( + unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned checkMax, unsigned* const workSpace) +{ + const BYTE* ip = (const BYTE*)source; + const BYTE* const iend = ip+sourceSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + U32* const Counting1 = workSpace; + U32* const Counting2 = Counting1 + 256; + U32* const Counting3 = Counting2 + 256; + U32* const Counting4 = Counting3 + 256; + + memset(workSpace, 0, 4*256*sizeof(unsigned)); + + /* safety checks */ + if (!sourceSize) { + memset(count, 0, maxSymbolValue + 1); + *maxSymbolValuePtr = 0; + return 0; + } + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ + { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + } + ip-=4; + } + + /* finish last symbols */ + while (ipmaxSymbolValue; s--) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + + { U32 s; + if (maxSymbolValue > 255) maxSymbolValue = 255; + for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + return (size_t)max; +} + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned* workSpace) +{ + if (sourceSize < 1500) /* heuristic threshold */ + return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); +} + +/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize) +{ + unsigned tmpCounters[1024]; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters); +} + +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (*maxSymbolValuePtr < 255) + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); + *maxSymbolValuePtr = 255; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); +} + +size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + unsigned tmpCounters[1024]; + return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters); } + /*-************************************************************** * FSE Compression Code ****************************************************************/ +/*! FSE_sizeof_CTable() : + FSE_CTable is a variable size structure which contains : + `U16 tableLog;` + `U16 maxSymbolValue;` + `U16 nextStateNumber[1 << tableLog];` // This size is variable + `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable +Allocation is manual (C standard does not support variable-size structures). +*/ +size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); +} FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) { @@ -335,7 +466,7 @@ void FSE_freeCTable (FSE_CTable* ct) { free(ct); } /* provides the minimum logSize to safely represent a distribution */ static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) { - U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; + U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; assert(srcSize > 1); /* Not supported, RLE should be used instead */ @@ -398,9 +529,6 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, } ToDistribute = (1 << tableLog) - distributed; - if (ToDistribute == 0) - return 0; - if ((total / ToDistribute) > lowOne) { /* risk of rounding to zero */ lowOne = (U32)((total * 3) / (ToDistribute * 2)); @@ -501,11 +629,11 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, U32 s; U32 nTotal = 0; for (s=0; s<=maxSymbolValue; s++) - RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]); + printf("%3i: %4i \n", s, normalizedCounter[s]); for (s=0; s<=maxSymbolValue; s++) nTotal += abs(normalizedCounter[s]); if (nTotal != (1U< not compressible */ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ @@ -707,7 +835,7 @@ typedef struct { size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) { fseWkspMax_t scratchBuffer; - DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ + FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); } diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c index 72bbead5bee..4c66c3b7746 100644 --- a/vendor/github.com/DataDog/zstd/fse_decompress.c +++ b/vendor/github.com/DataDog/zstd/fse_decompress.c @@ -49,7 +49,7 @@ * Error Management ****************************************************************/ #define FSE_isError ERR_isError -#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ /* check and forward error code */ #define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } diff --git a/vendor/github.com/DataDog/zstd/hist.c b/vendor/github.com/DataDog/zstd/hist.c deleted file mode 100644 index 45b7babc1e2..00000000000 --- a/vendor/github.com/DataDog/zstd/hist.c +++ /dev/null @@ -1,203 +0,0 @@ -/* ****************************************************************** - hist : Histogram functions - part of Finite State Entropy project - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ - -/* --- dependencies --- */ -#include "mem.h" /* U32, BYTE, etc. */ -#include "debug.h" /* assert, DEBUGLOG */ -#include "error_private.h" /* ERROR */ -#include "hist.h" - - -/* --- Error management --- */ -unsigned HIST_isError(size_t code) { return ERR_isError(code); } - -/*-************************************************************** - * Histogram functions - ****************************************************************/ -unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize) -{ - const BYTE* ip = (const BYTE*)src; - const BYTE* const end = ip + srcSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; - unsigned largestCount=0; - - memset(count, 0, (maxSymbolValue+1) * sizeof(*count)); - if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } - - while (ip largestCount) largestCount = count[s]; - } - - return largestCount; -} - -typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; - -/* HIST_count_parallel_wksp() : - * store histogram into 4 intermediate tables, recombined at the end. - * this design makes better use of OoO cpus, - * and is noticeably faster when some values are heavily repeated. - * But it needs some additional workspace for intermediate tables. - * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32. - * @return : largest histogram frequency, - * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ -static size_t HIST_count_parallel_wksp( - unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - HIST_checkInput_e check, - U32* const workSpace) -{ - const BYTE* ip = (const BYTE*)source; - const BYTE* const iend = ip+sourceSize; - unsigned maxSymbolValue = *maxSymbolValuePtr; - unsigned max=0; - U32* const Counting1 = workSpace; - U32* const Counting2 = Counting1 + 256; - U32* const Counting3 = Counting2 + 256; - U32* const Counting4 = Counting3 + 256; - - memset(workSpace, 0, 4*256*sizeof(unsigned)); - - /* safety checks */ - if (!sourceSize) { - memset(count, 0, maxSymbolValue + 1); - *maxSymbolValuePtr = 0; - return 0; - } - if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ - - /* by stripes of 16 bytes */ - { U32 cached = MEM_read32(ip); ip += 4; - while (ip < iend-15) { - U32 c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - c = cached; cached = MEM_read32(ip); ip += 4; - Counting1[(BYTE) c ]++; - Counting2[(BYTE)(c>>8) ]++; - Counting3[(BYTE)(c>>16)]++; - Counting4[ c>>24 ]++; - } - ip-=4; - } - - /* finish last symbols */ - while (ipmaxSymbolValue; s--) { - Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; - if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); - } } - - { U32 s; - if (maxSymbolValue > 255) maxSymbolValue = 255; - for (s=0; s<=maxSymbolValue; s++) { - count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; - if (count[s] > max) max = count[s]; - } } - - while (!count[maxSymbolValue]) maxSymbolValue--; - *maxSymbolValuePtr = maxSymbolValue; - return (size_t)max; -} - -/* HIST_countFast_wksp() : - * Same as HIST_countFast(), but using an externally provided scratch buffer. - * `workSpace` is a writable buffer which must be 4-bytes aligned, - * `workSpaceSize` must be >= HIST_WKSP_SIZE - */ -size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - void* workSpace, size_t workSpaceSize) -{ - if (sourceSize < 1500) /* heuristic threshold */ - return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); -} - -/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ -size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize) -{ - unsigned tmpCounters[HIST_WKSP_SIZE_U32]; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); -} - -/* HIST_count_wksp() : - * Same as HIST_count(), but using an externally provided scratch buffer. - * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ -size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* source, size_t sourceSize, - void* workSpace, size_t workSpaceSize) -{ - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); - if (*maxSymbolValuePtr < 255) - return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); - *maxSymbolValuePtr = 255; - return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); -} - -size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize) -{ - unsigned tmpCounters[HIST_WKSP_SIZE_U32]; - return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); -} diff --git a/vendor/github.com/DataDog/zstd/hist.h b/vendor/github.com/DataDog/zstd/hist.h deleted file mode 100644 index 8b389358dc1..00000000000 --- a/vendor/github.com/DataDog/zstd/hist.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ****************************************************************** - hist : Histogram functions - part of Finite State Entropy project - Copyright (C) 2013-present, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy - - Public forum : https://groups.google.com/forum/#!forum/lz4c -****************************************************************** */ - -/* --- dependencies --- */ -#include /* size_t */ - - -/* --- simple histogram functions --- */ - -/*! HIST_count(): - * Provides the precise count of each byte within a table 'count'. - * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). - * Updates *maxSymbolValuePtr with actual largest symbol value detected. - * @return : count of the most frequent symbol (which isn't identified). - * or an error code, which can be tested using HIST_isError(). - * note : if return == srcSize, there is only one symbol. - */ -size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize); - -unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */ - - -/* --- advanced histogram functions --- */ - -#define HIST_WKSP_SIZE_U32 1024 -#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) -/** HIST_count_wksp() : - * Same as HIST_count(), but using an externally provided scratch buffer. - * Benefit is this function will use very little stack space. - * `workSpace` is a writable buffer which must be 4-bytes aligned, - * `workSpaceSize` must be >= HIST_WKSP_SIZE - */ -size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize, - void* workSpace, size_t workSpaceSize); - -/** HIST_countFast() : - * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` - */ -size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize); - -/** HIST_countFast_wksp() : - * Same as HIST_countFast(), but using an externally provided scratch buffer. - * `workSpace` is a writable buffer which must be 4-bytes aligned, - * `workSpaceSize` must be >= HIST_WKSP_SIZE - */ -size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize, - void* workSpace, size_t workSpaceSize); - -/*! HIST_count_simple() : - * Same as HIST_countFast(), this function is unsafe, - * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. - * It is also a bit slower for large inputs. - * However, it does not need any additional memory (not even on stack). - * @return : count of the most frequent symbol. - * Note this function doesn't produce any error (i.e. it must succeed). - */ -unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, - const void* src, size_t srcSize); diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h index 6b572c448d9..b4645b4e519 100644 --- a/vendor/github.com/DataDog/zstd/huf.h +++ b/vendor/github.com/DataDog/zstd/huf.h @@ -1,7 +1,7 @@ /* ****************************************************************** - huff0 huffman codec, - part of Finite State Entropy library - Copyright (C) 2013-present, Yann Collet. + Huffman coder, part of New Generation Entropy library + header file + Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -163,29 +163,25 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) -#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \ - HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } +#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } /* **************************************** * Advanced decompression functions ******************************************/ -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -#endif +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ /* **************************************** @@ -212,7 +208,7 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si typedef enum { HUF_repeat_none, /**< Cannot use the previous table */ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ - HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ + HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ } HUF_repeat; /** HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. @@ -231,9 +227,7 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, */ #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) -size_t HUF_buildCTable_wksp (HUF_CElt* tree, - const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, - void* workSpace, size_t wkspSize); +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); /*! HUF_readStats() : * Read compact Huffman tree, saved by HUF_writeCTable(). @@ -248,15 +242,10 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); -/** HUF_getNbBits() : - * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : is not inlined, as HUF_CElt definition is private - * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); /* * HUF_decompress() does the following: - * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics + * 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics * 2. build Huffman table from save, using HUF_readDTableX?() * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() */ @@ -264,13 +253,13 @@ U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. - * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); /** * The minimum workspace size for the `workSpace` used in - * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). + * HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp(). * * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. @@ -281,22 +270,14 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); -#endif +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif +size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /* ====================== */ @@ -317,37 +298,25 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ -#endif +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ -#endif +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif -#ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif +size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /* BMI2 variants. * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. */ size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); -#endif +size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c index f074f1e0a95..83230b415f9 100644 --- a/vendor/github.com/DataDog/zstd/huf_compress.c +++ b/vendor/github.com/DataDog/zstd/huf_compress.c @@ -45,9 +45,8 @@ ****************************************************************/ #include /* memcpy, memset */ #include /* printf (debug) */ -#include "compiler.h" #include "bitstream.h" -#include "hist.h" +#include "compiler.h" #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ #include "fse.h" /* header compression */ #define HUF_STATIC_LINKING_ONLY @@ -59,7 +58,7 @@ * Error Management ****************************************************************/ #define HUF_isError ERR_isError -#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } @@ -82,28 +81,28 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. */ #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 -static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) +size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) { BYTE* const ostart = (BYTE*) dst; BYTE* op = ostart; BYTE* const oend = ostart + dstSize; - unsigned maxSymbolValue = HUF_TABLELOG_MAX; + U32 maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; BYTE scratchBuffer[1< not compressible */ + if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ } tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); @@ -134,7 +133,7 @@ struct HUF_CElt_s { `CTable` : Huffman tree to save, using huf representation. @return : size of saved CTable */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, - const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) + const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) { BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; @@ -169,7 +168,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize, } -size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize) +size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize) { BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ @@ -217,13 +216,6 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void return readSize; } -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue) -{ - const HUF_CElt* table = (const HUF_CElt*)symbolTable; - assert(symbolValue <= HUF_SYMBOLVALUE_MAX); - return table[symbolValue].nbBits; -} - typedef struct nodeElt_s { U32 count; @@ -315,7 +307,7 @@ typedef struct { U32 current; } rankPos; -static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue) +static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) { rankPos rank[32]; U32 n; @@ -347,7 +339,7 @@ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValu */ #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; -size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) { nodeElt* const huffNode0 = (nodeElt*)workSpace; nodeElt* const huffNode = huffNode0+1; @@ -421,7 +413,7 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo * @return : maxNbBits * Note : count is used before tree is written, so they can safely overlap */ -size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits) +size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) { huffNodeTable nodeTable; return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); @@ -610,14 +602,13 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } -typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; static size_t HUF_compressCTable_internal( BYTE* const ostart, BYTE* op, BYTE* const oend, const void* src, size_t srcSize, - HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) + unsigned singleStream, const HUF_CElt* CTable, const int bmi2) { - size_t const cSize = (nbStreams==HUF_singleStream) ? + size_t const cSize = singleStream ? HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) : HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2); if (HUF_isError(cSize)) { return cSize; } @@ -629,21 +620,21 @@ static size_t HUF_compressCTable_internal( } typedef struct { - unsigned count[HUF_SYMBOLVALUE_MAX + 1]; + U32 count[HUF_SYMBOLVALUE_MAX + 1]; HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; huffNodeTable nodeTable; } HUF_compress_tables_t; /* HUF_compress_internal() : * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ -static size_t -HUF_compress_internal (void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned huffLog, - HUF_nbStreams_e nbStreams, - void* workSpace, size_t wkspSize, - HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2) +static size_t HUF_compress_internal ( + void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + unsigned singleStream, + void* workSpace, size_t wkspSize, + HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, + const int bmi2) { HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace; BYTE* const ostart = (BYTE*)dst; @@ -652,7 +643,7 @@ HUF_compress_internal (void* dst, size_t dstSize, /* checks & inits */ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ - if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); + if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ @@ -665,13 +656,13 @@ HUF_compress_internal (void* dst, size_t dstSize, if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + singleStream, oldHufTable, bmi2); } /* Scan input and build symbol stats */ - { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) ); + { CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ - if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ + if (largest <= (srcSize >> 7)+1) return 0; /* heuristic : probably not compressible enough */ } /* Check validity of previous table */ @@ -684,15 +675,14 @@ HUF_compress_internal (void* dst, size_t dstSize, if (preferRepeat && repeat && *repeat != HUF_repeat_none) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + singleStream, oldHufTable, bmi2); } /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); - { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, - maxSymbolValue, huffLog, - table->nodeTable, sizeof(table->nodeTable)); - CHECK_F(maxBits); + { CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count, + maxSymbolValue, huffLog, + table->nodeTable, sizeof(table->nodeTable)) ); huffLog = (U32)maxBits; /* Zero unused symbols in CTable, so we can check it for validity */ memset(table->CTable + (maxSymbolValue + 1), 0, @@ -708,7 +698,7 @@ HUF_compress_internal (void* dst, size_t dstSize, if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, oldHufTable, bmi2); + singleStream, oldHufTable, bmi2); } } /* Use the new huffman table */ @@ -720,7 +710,7 @@ HUF_compress_internal (void* dst, size_t dstSize, } return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, - nbStreams, table->CTable, bmi2); + singleStream, table->CTable, bmi2); } @@ -730,7 +720,7 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, + maxSymbolValue, huffLog, 1 /*single stream*/, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } @@ -742,7 +732,7 @@ size_t HUF_compress1X_repeat (void* dst, size_t dstSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_singleStream, + maxSymbolValue, huffLog, 1 /*single stream*/, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } @@ -764,7 +754,7 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, void* workSpace, size_t wkspSize) { return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, + maxSymbolValue, huffLog, 0 /*4 streams*/, workSpace, wkspSize, NULL, NULL, 0, 0 /*bmi2*/); } @@ -779,7 +769,7 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) { return HUF_compress_internal(dst, dstSize, src, srcSize, - maxSymbolValue, huffLog, HUF_fourStreams, + maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2); } diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c index 3f8bd297320..73f5c46c061 100644 --- a/vendor/github.com/DataDog/zstd/huf_decompress.c +++ b/vendor/github.com/DataDog/zstd/huf_decompress.c @@ -1,7 +1,6 @@ /* ****************************************************************** - huff0 huffman decoder, - part of Finite State Entropy library - Copyright (C) 2013-present, Yann Collet. + Huffman decoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -30,37 +29,26 @@ You can contact the author at : - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c ****************************************************************** */ /* ************************************************************** * Dependencies ****************************************************************/ #include /* memcpy, memset */ -#include "compiler.h" #include "bitstream.h" /* BIT_* */ -#include "fse.h" /* to compress headers */ +#include "compiler.h" +#include "fse.h" /* header compression */ #define HUF_STATIC_LINKING_ONLY #include "huf.h" #include "error_private.h" -/* ************************************************************** -* Macros -****************************************************************/ - -/* These two optional macros force the use one way or another of the two - * Huffman decompression implementations. You can't force in both directions - * at the same time. - */ -#if defined(HUF_FORCE_DECOMPRESS_X1) && \ - defined(HUF_FORCE_DECOMPRESS_X2) -#error "Cannot force the use of the X1 and X2 decoders at the same time!" -#endif - /* ************************************************************** * Error Management ****************************************************************/ #define HUF_isError ERR_isError +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ #define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; } @@ -71,51 +59,6 @@ #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) -/* ************************************************************** -* BMI2 Variant Wrappers -****************************************************************/ -#if DYNAMIC_BMI2 - -#define HUF_DGEN(fn) \ - \ - static size_t fn##_default( \ - void* dst, size_t dstSize, \ - const void* cSrc, size_t cSrcSize, \ - const HUF_DTable* DTable) \ - { \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - \ - static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ - void* dst, size_t dstSize, \ - const void* cSrc, size_t cSrcSize, \ - const HUF_DTable* DTable) \ - { \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - \ - static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ - { \ - if (bmi2) { \ - return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ - } \ - return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ - } - -#else - -#define HUF_DGEN(fn) \ - static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ - size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ - { \ - (void)bmi2; \ - return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ - } - -#endif - - /*-***************************/ /* generic DTableDesc */ /*-***************************/ @@ -129,20 +72,18 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) } -#ifndef HUF_FORCE_DECOMPRESS_X2 - /*-***************************/ /* single-symbol decoding */ /*-***************************/ -typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ -size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { U32 tableLog = 0; U32 nbSymbols = 0; size_t iSize; void* const dtPtr = DTable + 1; - HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; + HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; U32* rankVal; BYTE* huffWeight; @@ -155,7 +96,7 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); - DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); + HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); @@ -183,7 +124,7 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize U32 const w = huffWeight[n]; U32 const length = (1 << w) >> 1; U32 u; - HUF_DEltX1 D; + HUF_DEltX2 D; D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); for (u = rankVal[w]; u < rankVal[w] + length; u++) dt[u] = D; @@ -193,15 +134,17 @@ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize return iSize; } -size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) +size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX1_wksp(DTable, src, srcSize, + return HUF_readDTableX2_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + FORCE_INLINE_TEMPLATE BYTE -HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) +HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ BYTE const c = dt[val].byte; @@ -209,44 +152,44 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog return c; } -#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ - *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) -#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) -#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ if (MEM_64bits()) \ - HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) HINT_INLINE size_t -HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) { BYTE* const pStart = p; /* up to 4 symbols at a time */ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_1(p, bitDPtr); - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); } /* [0-3] symbols remaining */ if (MEM_32bits()) while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no more data to retrieve from bitstream, no need to reload */ while (p < pEnd) - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); return pEnd-pStart; } FORCE_INLINE_TEMPLATE size_t -HUF_decompress1X1_usingDTable_internal_body( +HUF_decompress1X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) @@ -254,14 +197,14 @@ HUF_decompress1X1_usingDTable_internal_body( BYTE* op = (BYTE*)dst; BYTE* const oend = op + dstSize; const void* dtPtr = DTable + 1; - const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; BIT_DStream_t bitD; DTableDesc const dtd = HUF_getDTableDesc(DTable); U32 const dtLog = dtd.tableLog; CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); - HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); + HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); @@ -269,7 +212,7 @@ HUF_decompress1X1_usingDTable_internal_body( } FORCE_INLINE_TEMPLATE size_t -HUF_decompress4X1_usingDTable_internal_body( +HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) @@ -281,7 +224,7 @@ HUF_decompress4X1_usingDTable_internal_body( BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable + 1; - const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; /* Init */ BIT_DStream_t bitD1; @@ -317,22 +260,22 @@ HUF_decompress4X1_usingDTable_internal_body( /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) { - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_1(op1, &bitD1); - HUF_DECODE_SYMBOLX1_1(op2, &bitD2); - HUF_DECODE_SYMBOLX1_1(op3, &bitD3); - HUF_DECODE_SYMBOLX1_1(op4, &bitD4); - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_0(op1, &bitD1); - HUF_DECODE_SYMBOLX1_0(op2, &bitD2); - HUF_DECODE_SYMBOLX1_0(op3, &bitD3); - HUF_DECODE_SYMBOLX1_0(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); BIT_reloadDStream(&bitD1); BIT_reloadDStream(&bitD2); BIT_reloadDStream(&bitD3); @@ -348,10 +291,191 @@ HUF_decompress4X1_usingDTable_internal_body( /* note : op4 supposed already verified within main loop */ /* finish bitStreams one by one */ - HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); + } } + return 1; +} + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +HINT_INLINE size_t +HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, + const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to end : up to 2 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress1X4_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BIT_DStream_t bitD; + + /* Init */ + CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); + + /* decode */ + { BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); + } + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; +} + + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress4X4_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + size_t const segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); + CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); + CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); + CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); /* check */ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); @@ -367,119 +491,153 @@ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable); +#if DYNAMIC_BMI2 + +#define X(fn) \ + \ + static size_t fn##_default( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + if (bmi2) { \ + return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#else + +#define X(fn) \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + (void)bmi2; \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#endif -HUF_DGEN(HUF_decompress1X1_usingDTable_internal) -HUF_DGEN(HUF_decompress4X1_usingDTable_internal) +X(HUF_decompress1X2_usingDTable_internal) +X(HUF_decompress4X2_usingDTable_internal) +X(HUF_decompress1X4_usingDTable_internal) +X(HUF_decompress4X4_usingDTable_internal) +#undef X -size_t HUF_decompress1X1_usingDTable( +size_t HUF_decompress1X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); + size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } -size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, +size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); } -size_t HUF_decompress4X1_usingDTable( +size_t HUF_decompress4X2_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 0) return ERROR(GENERIC); - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } -static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize, + size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } -size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); + return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); } -size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } -size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { - HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } -#endif /* HUF_FORCE_DECOMPRESS_X2 */ - - -#ifndef HUF_FORCE_DECOMPRESS_X1 /* *************************/ /* double-symbols decoding */ /* *************************/ - -typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; -typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; - -/* HUF_fillDTableX2Level2() : +/* HUF_fillDTableX4Level2() : * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, const U32* rankValOrigin, const int minWeight, const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq) { - HUF_DEltX2 DElt; + HUF_DEltX4 DElt; U32 rankVal[HUF_TABLELOG_MAX + 1]; /* get pre-calculated rankVal */ @@ -514,8 +672,10 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 co } } } +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; -static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, const U32 sortedListSize, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) @@ -540,12 +700,12 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, int minWeight = nbBits + scaleLog; if (minWeight < 1) minWeight = 1; sortedRank = rankStart[minWeight]; - HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList+sortedRank, sortedListSize-sortedRank, nbBitsBaseline, symbol); } else { - HUF_DEltX2 DElt; + HUF_DEltX4 DElt; MEM_writeLE16(&(DElt.sequence), symbol); DElt.nbBits = (BYTE)(nbBits); DElt.length = 1; @@ -557,16 +717,16 @@ static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, } } -size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, - const void* src, size_t srcSize, - void* workSpace, size_t wkspSize) +size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src, + size_t srcSize, void* workSpace, + size_t wkspSize) { U32 tableLog, maxW, sizeOfSort, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); U32 const maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ - HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; + HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; U32 *rankStart; rankValCol_t* rankVal; @@ -592,7 +752,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, rankStart = rankStart0 + 1; memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); - DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ @@ -646,7 +806,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, rankValPtr[w] = rankVal0[w] >> consumed; } } } } - HUF_fillDTableX2(dt, maxTableLog, + HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog+1); @@ -657,308 +817,112 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, return iSize; } -size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) +size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_readDTableX2_wksp(DTable, src, srcSize, + return HUF_readDTableX4_wksp(DTable, src, srcSize, workSpace, sizeof(workSpace)); } - -FORCE_INLINE_TEMPLATE U32 -HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 2); - BIT_skipBits(DStream, dt[val].nbBits); - return dt[val].length; -} - -FORCE_INLINE_TEMPLATE U32 -HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) -{ - size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - memcpy(op, dt+val, 1); - if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); - else { - if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { - BIT_skipBits(DStream, dt[val].nbBits); - if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) - /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ - DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); - } } - return 1; -} - -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ - if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) - -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ - if (MEM_64bits()) \ - ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) - -HINT_INLINE size_t -HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, - const HUF_DEltX2* const dt, const U32 dtLog) -{ - BYTE* const pStart = p; - - /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - } - - /* closer to end : up to 2 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ - - if (p < pEnd) - p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); - - return p-pStart; -} - -FORCE_INLINE_TEMPLATE size_t -HUF_decompress1X2_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - BIT_DStream_t bitD; - - /* Init */ - CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); - - /* decode */ - { BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ - const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); - } - - /* check */ - if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); - - /* decoded size */ - return dstSize; -} - - -FORCE_INLINE_TEMPLATE size_t -HUF_decompress4X2_usingDTable_internal_body( - void* dst, size_t dstSize, - const void* cSrc, size_t cSrcSize, - const HUF_DTable* DTable) -{ - if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - - { const BYTE* const istart = (const BYTE*) cSrc; - BYTE* const ostart = (BYTE*) dst; - BYTE* const oend = ostart + dstSize; - const void* const dtPtr = DTable+1; - const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; - - /* Init */ - BIT_DStream_t bitD1; - BIT_DStream_t bitD2; - BIT_DStream_t bitD3; - BIT_DStream_t bitD4; - size_t const length1 = MEM_readLE16(istart); - size_t const length2 = MEM_readLE16(istart+2); - size_t const length3 = MEM_readLE16(istart+4); - size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); - const BYTE* const istart1 = istart + 6; /* jumpTable */ - const BYTE* const istart2 = istart1 + length1; - const BYTE* const istart3 = istart2 + length2; - const BYTE* const istart4 = istart3 + length3; - size_t const segmentSize = (dstSize+3) / 4; - BYTE* const opStart2 = ostart + segmentSize; - BYTE* const opStart3 = opStart2 + segmentSize; - BYTE* const opStart4 = opStart3 + segmentSize; - BYTE* op1 = ostart; - BYTE* op2 = opStart2; - BYTE* op3 = opStart3; - BYTE* op4 = opStart4; - U32 endSignal; - DTableDesc const dtd = HUF_getDTableDesc(DTable); - U32 const dtLog = dtd.tableLog; - - if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ - CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); - CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); - CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); - CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); - - /* 16-32 symbols per loop (4-8 symbols per stream) */ - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - - endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); - } - - /* check corruption */ - if (op1 > opStart2) return ERROR(corruption_detected); - if (op2 > opStart3) return ERROR(corruption_detected); - if (op3 > opStart4) return ERROR(corruption_detected); - /* note : op4 already verified within main loop */ - - /* finish bitStreams one by one */ - HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); - HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); - HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); - HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); - - /* check */ - { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); - if (!endCheck) return ERROR(corruption_detected); } - - /* decoded size */ - return dstSize; - } -} - -HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -HUF_DGEN(HUF_decompress4X2_usingDTable_internal) - -size_t HUF_decompress1X2_usingDTable( +size_t HUF_decompress1X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, + size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); } -size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, +size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } -size_t HUF_decompress4X2_usingDTable( +size_t HUF_decompress4X4_usingDTable( void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc dtd = HUF_getDTableDesc(DTable); if (dtd.tableType != 1) return ERROR(GENERIC); - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } -static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, +static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; - size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, + size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize) { - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); + return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); } -size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, sizeof(workSpace)); } -size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { - HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); - return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); } -#endif /* HUF_FORCE_DECOMPRESS_X1 */ - -/* ***********************************/ -/* Universal decompression selectors */ -/* ***********************************/ +/* ********************************/ +/* Generic decompression selector */ +/* ********************************/ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, @@ -966,22 +930,11 @@ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const HUF_DTable* DTable) { DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); -#endif + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); } -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = { @@ -1003,26 +956,16 @@ static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, qu {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ }; -#endif /** HUF_selectDecoder() : * Tells which decoder is likely to decode faster, * based on a set of pre-computed metrics. - * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) { assert(dstSize > 0); - assert(dstSize <= 128*1024); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dstSize; - (void)cSrcSize; - return 0; -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dstSize; - (void)cSrcSize; - return 1; -#else + assert(dstSize <= 128 KB); /* decoder timing evaluation */ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ U32 const D256 = (U32)(dstSize >> 8); @@ -1030,18 +973,14 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; - } -#endif -} +} } typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) { -#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) - static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; -#endif + static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 }; /* validation checks */ if (dstSize == 0) return ERROR(dstSize_tooSmall); @@ -1050,17 +989,7 @@ size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcS if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); -#else return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); -#endif } } @@ -1073,18 +1002,8 @@ size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); -#else - return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : - HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; -#endif + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } } @@ -1106,19 +1025,8 @@ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize): - HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif + return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): + HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); } } @@ -1133,22 +1041,10 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, - cSrcSize, workSpace, wkspSize); -#else - return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, + return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): - HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, + HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); -#endif } } @@ -1164,49 +1060,27 @@ size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#else - return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#endif + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); } -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) { const BYTE* ip = (const BYTE*) cSrc; - size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); + size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); if (HUF_isError(hSize)) return hSize; if (hSize >= cSrcSize) return ERROR(srcSize_wrong); ip += hSize; cSrcSize -= hSize; - return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); } -#endif size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) { DTableDesc const dtd = HUF_getDTableDesc(DTable); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)dtd; - assert(dtd.tableType == 0); - return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)dtd; - assert(dtd.tableType == 1); - return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#else - return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : - HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); -#endif + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); } size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) @@ -1216,17 +1090,7 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds if (cSrcSize == 0) return ERROR(corruption_detected); { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); -#if defined(HUF_FORCE_DECOMPRESS_X1) - (void)algoNb; - assert(algoNb == 0); - return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#elif defined(HUF_FORCE_DECOMPRESS_X2) - (void)algoNb; - assert(algoNb == 1); - return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#else - return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : - HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); -#endif + return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : + HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); } } diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h index 5da248756ff..47d2300177c 100644 --- a/vendor/github.com/DataDog/zstd/mem.h +++ b/vendor/github.com/DataDog/zstd/mem.h @@ -39,10 +39,6 @@ extern "C" { # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ #endif -#ifndef __has_builtin -# define __has_builtin(x) 0 /* compat. with non-clang compilers */ -#endif - /* code only tested on 32 and 64 bits systems */ #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } @@ -61,23 +57,11 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size typedef uint64_t U64; typedef int64_t S64; #else -# include -#if CHAR_BIT != 8 -# error "this implementation requires char to be exactly 8-bit type" -#endif typedef unsigned char BYTE; -#if USHRT_MAX != 65535 -# error "this implementation requires short to be exactly 16-bit type" -#endif typedef unsigned short U16; typedef signed short S16; -#if UINT_MAX != 4294967295 -# error "this implementation requires int to be exactly 32-bit type" -#endif typedef unsigned int U32; typedef signed int S32; -/* note : there are no limits defined for long long type in C90. - * limits exist in C99, however, in such case, is preferred */ typedef unsigned long long U64; typedef signed long long S64; #endif @@ -202,8 +186,7 @@ MEM_STATIC U32 MEM_swap32(U32 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_ulong(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap32)) +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap32(in); #else return ((in << 24) & 0xff000000 ) | @@ -217,8 +200,7 @@ MEM_STATIC U64 MEM_swap64(U64 in) { #if defined(_MSC_VER) /* Visual Studio */ return _byteswap_uint64(in); -#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ - || (defined(__clang__) && __has_builtin(__builtin_bswap64)) +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) return __builtin_bswap64(in); #else return ((in << 56) & 0xff00000000000000ULL) | diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c index 7a829454328..773488b0725 100644 --- a/vendor/github.com/DataDog/zstd/pool.c +++ b/vendor/github.com/DataDog/zstd/pool.c @@ -10,10 +10,9 @@ /* ====== Dependencies ======= */ -#include /* size_t */ -#include "debug.h" /* assert */ -#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ +#include /* size_t */ #include "pool.h" +#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ /* ====== Compiler specifics ====== */ #if defined(_MSC_VER) @@ -34,9 +33,8 @@ typedef struct POOL_job_s { struct POOL_ctx_s { ZSTD_customMem customMem; /* Keep track of the threads */ - ZSTD_pthread_t* threads; - size_t threadCapacity; - size_t threadLimit; + ZSTD_pthread_t *threads; + size_t numThreads; /* The queue is a circular buffer */ POOL_job *queue; @@ -60,10 +58,10 @@ struct POOL_ctx_s { }; /* POOL_thread() : - * Work thread for the thread pool. - * Waits for jobs and executes them. - * @returns : NULL on failure else non-null. - */ + Work thread for the thread pool. + Waits for jobs and executes them. + @returns : NULL on failure else non-null. +*/ static void* POOL_thread(void* opaque) { POOL_ctx* const ctx = (POOL_ctx*)opaque; if (!ctx) { return NULL; } @@ -71,55 +69,50 @@ static void* POOL_thread(void* opaque) { /* Lock the mutex and wait for a non-empty queue or until shutdown */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); - while ( ctx->queueEmpty - || (ctx->numThreadsBusy >= ctx->threadLimit) ) { - if (ctx->shutdown) { - /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), - * a few threads will be shutdown while !queueEmpty, - * but enough threads will remain active to finish the queue */ - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return opaque; - } + while (ctx->queueEmpty && !ctx->shutdown) { ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); } + /* empty => shutting down: so stop */ + if (ctx->queueEmpty) { + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return opaque; + } /* Pop a job off the queue */ { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; ctx->queueEmpty = ctx->queueHead == ctx->queueTail; /* Unlock the mutex, signal a pusher, and run the job */ - ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePushCond); job.function(job.opaque); /* If the intended queue size was 0, signal after finishing job */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - ctx->numThreadsBusy--; if (ctx->queueSize == 1) { + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->numThreadsBusy--; + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); ZSTD_pthread_cond_signal(&ctx->queuePushCond); - } - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - } + } } } /* for (;;) */ - assert(0); /* Unreachable */ + /* Unreachable */ } POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem) { +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { POOL_ctx* ctx; - /* Check parameters */ + /* Check the parameters */ if (!numThreads) { return NULL; } /* Allocate the context and zero initialize */ ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); if (!ctx) { return NULL; } /* Initialize the job queue. - * It needs one extra space since one space is wasted to differentiate - * empty and full queues. + * It needs one extra space since one space is wasted to differentiate empty + * and full queues. */ ctx->queueSize = queueSize + 1; ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem); @@ -133,7 +126,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ctx->shutdown = 0; /* Allocate space for the thread handles */ ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); - ctx->threadCapacity = 0; + ctx->numThreads = 0; ctx->customMem = customMem; /* Check for errors */ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } @@ -141,12 +134,11 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, { size_t i; for (i = 0; i < numThreads; ++i) { if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = i; + ctx->numThreads = i; POOL_free(ctx); return NULL; } } - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; + ctx->numThreads = numThreads; } return ctx; } @@ -164,8 +156,8 @@ static void POOL_join(POOL_ctx* ctx) { ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); /* Join all of the threads */ { size_t i; - for (i = 0; i < ctx->threadCapacity; ++i) { - ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + for (i = 0; i < ctx->numThreads; ++i) { + ZSTD_pthread_join(ctx->threads[i], NULL); } } } @@ -180,68 +172,24 @@ void POOL_free(POOL_ctx *ctx) { ZSTD_free(ctx, ctx->customMem); } - - size_t POOL_sizeof(POOL_ctx *ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) - + ctx->threadCapacity * sizeof(ZSTD_pthread_t); -} - - -/* @return : 0 on success, 1 on error */ -static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) -{ - if (numThreads <= ctx->threadCapacity) { - if (!numThreads) return 1; - ctx->threadLimit = numThreads; - return 0; - } - /* numThreads > threadCapacity */ - { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); - if (!threadPool) return 1; - /* replace existing thread pool */ - memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); - ZSTD_free(ctx->threads, ctx->customMem); - ctx->threads = threadPool; - /* Initialize additional threads */ - { size_t threadId; - for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { - if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { - ctx->threadCapacity = threadId; - return 1; - } } - } } - /* successfully expanded */ - ctx->threadCapacity = numThreads; - ctx->threadLimit = numThreads; - return 0; -} - -/* @return : 0 on success, 1 on error */ -int POOL_resize(POOL_ctx* ctx, size_t numThreads) -{ - int result; - if (ctx==NULL) return 1; - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - result = POOL_resize_internal(ctx, numThreads); - ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return result; + + ctx->numThreads * sizeof(ZSTD_pthread_t); } /** * Returns 1 if the queue is full and 0 otherwise. * - * When queueSize is 1 (pool was created with an intended queueSize of 0), - * then a queue is empty if there is a thread free _and_ no job is waiting. + * If the queueSize is 1 (the pool was created with an intended queueSize of 0), + * then a queue is empty if there is a thread free and no job is waiting. */ static int isQueueFull(POOL_ctx const* ctx) { if (ctx->queueSize > 1) { return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); } else { - return (ctx->numThreadsBusy == ctx->threadLimit) || + return ctx->numThreadsBusy == ctx->numThreads || !ctx->queueEmpty; } } @@ -315,11 +263,6 @@ void POOL_free(POOL_ctx* ctx) { (void)ctx; } -int POOL_resize(POOL_ctx* ctx, size_t numThreads) { - (void)ctx; (void)numThreads; - return 0; -} - void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { (void)ctx; function(opaque); diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h index 458d37f13c3..a57e9b4fabc 100644 --- a/vendor/github.com/DataDog/zstd/pool.h +++ b/vendor/github.com/DataDog/zstd/pool.h @@ -30,50 +30,40 @@ typedef struct POOL_ctx_s POOL_ctx; */ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem); +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); /*! POOL_free() : - * Free a thread pool returned by POOL_create(). - */ + Free a thread pool returned by POOL_create(). +*/ void POOL_free(POOL_ctx* ctx); -/*! POOL_resize() : - * Expands or shrinks pool's number of threads. - * This is more efficient than releasing + creating a new context, - * since it tries to preserve and re-use existing threads. - * `numThreads` must be at least 1. - * @return : 0 when resize was successful, - * !0 (typically 1) if there is an error. - * note : only numThreads can be resized, queueSize remains unchanged. - */ -int POOL_resize(POOL_ctx* ctx, size_t numThreads); - /*! POOL_sizeof() : - * @return threadpool memory usage - * note : compatible with NULL (returns 0 in this case) - */ + return memory usage of pool returned by POOL_create(). +*/ size_t POOL_sizeof(POOL_ctx* ctx); /*! POOL_function : - * The function type that can be added to a thread pool. - */ + The function type that can be added to a thread pool. +*/ typedef void (*POOL_function)(void*); +/*! POOL_add_function : + The function type for a generic thread pool add function. +*/ +typedef void (*POOL_add_function)(void*, POOL_function, void*); /*! POOL_add() : - * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. - * Possibly blocks until there is room in the queue. - * Note : The function may be executed asynchronously, - * therefore, `opaque` must live until function has been completed. - */ + Add the job `function(opaque)` to the thread pool. `ctx` must be valid. + Possibly blocks until there is room in the queue. + Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed. +*/ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); /*! POOL_tryAdd() : - * Add the job `function(opaque)` to thread pool _if_ a worker is available. - * Returns immediately even if not (does not block). - * @return : 1 if successful, 0 if not. - */ + Add the job `function(opaque)` to the thread pool if a worker is available. + return immediately otherwise. + @return : 1 if successful, 0 if not. +*/ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c index 532b8161929..9d9c0e963cb 100644 --- a/vendor/github.com/DataDog/zstd/xxhash.c +++ b/vendor/github.com/DataDog/zstd/xxhash.c @@ -98,7 +98,6 @@ /* Modify the local functions below should you wish to use some other memory routines */ /* for malloc(), free() */ #include -#include /* size_t */ static void* XXH_malloc(size_t s) { return malloc(s); } static void XXH_free (void* p) { free(p); } /* for memcpy() */ diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c index c753da0dbbf..7d24e499181 100644 --- a/vendor/github.com/DataDog/zstd/zdict.c +++ b/vendor/github.com/DataDog/zstd/zdict.c @@ -255,15 +255,15 @@ static dictItem ZDICT_analyzePos( } { int i; - U32 mml; + U32 searchLength; U32 refinedStart = start; U32 refinedEnd = end; DISPLAYLEVEL(4, "\n"); - DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos); + DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (U32)(end-start), MINMATCHLENGTH, (U32)pos); DISPLAYLEVEL(4, "\n"); - for (mml = MINMATCHLENGTH ; ; mml++) { + for (searchLength = MINMATCHLENGTH ; ; searchLength++) { BYTE currentChar = 0; U32 currentCount = 0; U32 currentID = refinedStart; @@ -271,13 +271,13 @@ static dictItem ZDICT_analyzePos( U32 selectedCount = 0; U32 selectedID = currentID; for (id =refinedStart; id < refinedEnd; id++) { - if (b[suffix[id] + mml] != currentChar) { + if (b[suffix[id] + searchLength] != currentChar) { if (currentCount > selectedCount) { selectedCount = currentCount; selectedID = currentID; } currentID = id; - currentChar = b[ suffix[id] + mml]; + currentChar = b[ suffix[id] + searchLength]; currentCount = 0; } currentCount ++; @@ -293,7 +293,7 @@ static dictItem ZDICT_analyzePos( refinedEnd = refinedStart + selectedCount; } - /* evaluate gain based on new dict */ + /* evaluate gain based on new ref */ start = refinedStart; pos = suffix[refinedStart]; end = start; @@ -341,8 +341,8 @@ static dictItem ZDICT_analyzePos( for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) savings[i] = savings[i-1] + (lengthList[i] * (i-3)); - DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", - (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); + DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f) \n", + (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength); solution.pos = (U32)pos; solution.length = (U32)maxLength; @@ -497,7 +497,7 @@ static U32 ZDICT_dictSize(const dictItem* dictList) static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */ const size_t* fileSizes, unsigned nbFiles, - unsigned minRatio, U32 notificationLevel) + U32 minRatio, U32 notificationLevel) { int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0)); int* const suffix = suffix0+1; @@ -523,11 +523,11 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize, memset(doneMarks, 0, bufferSize+16); /* limit sample set size (divsufsort limitation)*/ - if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20)); + if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20)); while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; /* sort */ - DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); + DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20)); { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } } @@ -581,7 +581,7 @@ static void ZDICT_fillNoise(void* buffer, size_t length) typedef struct { - ZSTD_CDict* dict; /* dictionary */ + ZSTD_CCtx* ref; /* contains reference to dictionary */ ZSTD_CCtx* zc; /* working context */ void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ } EStats_ress_t; @@ -589,7 +589,7 @@ typedef struct #define MAXREPOFFSET 1024 static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, - unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets, + U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets, const void* src, size_t srcSize, U32 notificationLevel) { @@ -597,12 +597,11 @@ static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, size_t cSize; if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ - { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); - if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } - + { size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0); + if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; } } cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); - if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } + if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; } if (cSize) { /* if == 0; block is not compressible */ const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); @@ -671,7 +670,7 @@ static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. */ -static void ZDICT_flatLit(unsigned* countLit) +static void ZDICT_flatLit(U32* countLit) { int u; for (u=1; u<256; u++) countLit[u] = 2; @@ -687,18 +686,18 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, const void* dictBuffer, size_t dictBufferSize, unsigned notificationLevel) { - unsigned countLit[256]; + U32 countLit[256]; HUF_CREATE_STATIC_CTABLE(hufTable, 255); - unsigned offcodeCount[OFFCODE_MAX+1]; + U32 offcodeCount[OFFCODE_MAX+1]; short offcodeNCount[OFFCODE_MAX+1]; U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); - unsigned matchLengthCount[MaxML+1]; + U32 matchLengthCount[MaxML+1]; short matchLengthNCount[MaxML+1]; - unsigned litLengthCount[MaxLL+1]; + U32 litLengthCount[MaxLL+1]; short litLengthNCount[MaxLL+1]; U32 repOffset[MAXREPOFFSET]; offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; - EStats_ress_t esr = { NULL, NULL, NULL }; + EStats_ress_t esr; ZSTD_parameters params; U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; size_t pos = 0, errorCode; @@ -709,6 +708,14 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, /* init */ DEBUGLOG(4, "ZDICT_analyzeEntropy"); + esr.ref = ZSTD_createCCtx(); + esr.zc = ZSTD_createCCtx(); + esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); + if (!esr.ref || !esr.zc || !esr.workPlace) { + eSize = ERROR(memory_allocation); + DISPLAYLEVEL(1, "Not enough memory \n"); + goto _cleanup; + } if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; @@ -717,17 +724,14 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, memset(repOffset, 0, sizeof(repOffset)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset(bestRepOffset, 0, sizeof(bestRepOffset)); - if (compressionLevel==0) compressionLevel = g_compressionLevel_default; + if (compressionLevel<=0) compressionLevel = g_compressionLevel_default; params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); - - esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem); - esr.zc = ZSTD_createCCtx(); - esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); - if (!esr.dict || !esr.zc || !esr.workPlace) { - eSize = ERROR(memory_allocation); - DISPLAYLEVEL(1, "Not enough memory \n"); - goto _cleanup; - } + { size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0); + if (ZSTD_isError(beginResult)) { + DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult)); + eSize = ERROR(GENERIC); + goto _cleanup; + } } /* collect stats on all samples */ for (u=0; u